code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
import cv
import cv2
import numpy as np
import collections
def extract( image, region ):
histogram = dict()
imgcp = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
cv.Copy( image, imgcp )
for pixel in region:
color = imgcp[pixel[1], pixel[0]]
if color not in histogram.keys():
histogram[color] = 1
else:
histogram[color] += 1
prevailing = histogram.keys()[0]
for color in histogram.keys():
if histogram[color] > histogram[prevailing]:
prevailing = color
start = region_start( region )
height = region_height( region )
width = region_width( region )
# dirty
imgcp = cropImage( imgcp, start[0] - 5, start[1] - 2, start[0] + width + 5, start[1] + height + 2 )
avg_color = np.average( histogram.keys(), 0, histogram.values() )
cv.Threshold( imgcp, imgcp, int( avg_color * 0.75 ), 255, 0 )
return imgcp
def manually_convolve( img1, img2 ):
for x in range ( 0, img1.width ):
for y in range ( 0, img1.height ):
img1[y, x] *= img2[y, x] / 255.0
return img1
# so far just red - hard-coded
def split_channels( img ):
if img.nChannels == 1:
return img
new_img = cv.CreateImage( cv.GetSize( img ), img.depth, 1 )
for y in range( 0, img.height ):
for x in range( 0, img.width ):
new_img[y, x] = img[y, x][2];
return new_img
def thicken_contour( image, edge_col=255, median_radius=1 ):
img = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
kernel = cv.CreateMat( 2 * median_radius + 1, 2 * median_radius + 1, cv.CV_8S )
cv.Set( kernel[:, :], 1 )
cv.Filter2D( image, img, kernel )
return img
# sums up values of given piece of image - checks for borders
def safe_subsum( mat, startx, starty, stopx, stopy ):
list = np.asarray( mat[max( starty, 0 ):min( stopy, mat.height - 1 ), max( startx, 0 ):min( stopx, mat.width - 1 )] ).reshape( -1 ).tolist()
return sum( list )
# hard-coded: margin
def get_neighbors( img, point ):
neighbors = set()
for dx in range( -1, 2 ):
for dy in range( -1, 2 ):
x = point[0] + dx
y = point[1] + dy
if x >= 0 and x < img.width and y >= 0 and y < img.height: # and ( dx == 0 or dy == 0 ):
neighbors.add( ( x, y ) )
return neighbors
# region consists of one element only
def create_region( img, point, cutoff=10 ):
region = set()
region.add( point )
neighbors = get_neighbors( img, point )
while len( neighbors ) > 0:
far_neighbors = set()
for neighbor in neighbors:
if neighbor not in region and merge_criterion( img, point, neighbor, cutoff ):
region.add( neighbor )
for far_neighbor in get_neighbors( img, neighbor ):
if far_neighbor not in region:
far_neighbors.add( far_neighbor )
neighbors = far_neighbors
return region
def merge_criterion( img, pt_one, pt_two, cutoff=10 ):
# print "pt_one: " + str( pt_one ) + ", pt_two: " + str( pt_two )+", H:"+str(img.height)+", W:"+str(img.width)
difference = abs( img[pt_one[1], pt_one[0]] - img[pt_two[1], pt_two[0]] )
return difference < cutoff
def region_start( region ):
minx = 100000
miny = 100000
for point in region:
minx = min( point[0], minx )
miny = min( point[1], miny )
return ( minx, miny )
def region_leanness( region ):
return ( region_height( region ) + 0.0 ) / region_width( region )
def region_height( region ):
return region_span( region, 1 )
def region_width( region ):
return region_span( region, 0 )
def region_center( region ):
sumx = sumy = 0
for point in region:
sumy += point[1]
sumx += point[0]
return ( 1.0 * sumx / len( region ), 1.0 * sumy / len( region ) )
def region_span( region, axis_index ):
minh = 100000
maxh = -100000
for point in region:
minh = min( point[axis_index], minh )
maxh = max( point[axis_index], maxh )
return maxh - minh + 1
# returns a cropped image object
def cropImage( image, xstart, ystart, xend, yend ):
width = min( xend, image.width - 1 ) - xstart
height = min( yend, image.height - 1 ) - ystart
cv.CreateImage
cropped = cv.CreateImage( ( width, height ), image.depth, image.nChannels )
src_region = cv.GetSubRect( image, ( max( xstart, 0 ), max( ystart, 0 ), width, height ) )
cv.Copy( src_region, cropped )
return cropped
# under construction
def grow_regions( image, seed_color=0, eaten_color=255 ):
print "growing regions"
selected_regions = list()
for y in range( 0, image.height ):
for x in range( 0, image.width ):
if image[y, x] == seed_color:
region = create_region( image, ( x, y ), 25 )
if len( region ) > 1:
selected_regions.append( region )
for point in region:
image[point[1], point[0]] = eaten_color;
paint_regions( selected_regions, image )
return selected_regions
def kill_the_losers( org_img, regions ):
sf = 4
truth = True
while truth:
truth = False
for i in range( 0, len( regions ) ):
# ratio_criterion = len( regions[i] ) * sf / region_height( regions[i] ) < region_width( regions[i] ) or len( regions[i] ) * sf / region_width( regions[i] ) < region_height( regions[i] )
length_criterion = region_width( regions[i] ) < 1.5 * region_height( regions[i] )
height_criterion = region_height( regions[i] ) < org_img.height * 0.02
size_criterion = len( regions[i] ) < 4 * optimal_radius( org_img )
if size_criterion or length_criterion or height_criterion:
truth = True
regions.pop( i )
break;
paint_regions( regions, org_img )
return regions
def cluster_regions ( org_img, regions ):
starts = list()
heights = list()
widths = list()
centers = list()
# init data for merging
for region in regions:
start = region_start( region )
starts.append( start )
heights.append( region_height( region ) )
widths.append( region_width( region ) )
centers.append( region_center( region ) )
#
truth = True
iterations_counter = 0
while truth and len( regions ) > 1:
iterations_counter += 1
if iterations_counter % 25 == 0:
print "clustering regions..."
for i in range( 0, len( regions ) ):
truth = False
for j in range( i + 1, len( regions ) ):
joint_mass = len( regions[i] ) + len( regions[j] )
gravity_criterion = ( ( centers[i][0] - centers[j][0] ) ** 2 + ( centers[i][1] - centers[j][1] ) ** 2 ) < 50 * ( joint_mass )
horizontal_criterion = ( centers[i][0] - centers[j][0] ) ** 2 < 40 * joint_mass
vertical_criterion = ( centers[i][1] - centers[j][1] ) ** 2 < joint_mass
width_criterion = widths[i] + widths[j] + abs( centers[i][0] - centers[j][0] ) < 2 * len( regions[i] ) + len( regions[j] )
if ( gravity_criterion and width_criterion and vertical_criterion and horizontal_criterion ):
truth = True
regions[i] = regions[i].union( regions[j] )
heights[i] = region_height( regions[i] )
widths[i] = region_width( regions[i] )
starts[i] = region_start( regions[i] )
centers[i] = region_center( regions[i] )
regions.pop( j )
heights.pop( j )
widths.pop( j )
centers.pop( j )
starts.pop( j )
break;
if truth:
break
paint_regions( regions, org_img )
print "all suitable regions merged"
return regions
def paint_regions( regions, image, caption="cpoo" ):
res = cv.CreateImage( cv.GetSize( image ), image.depth, 3 )
# assumes 3-channeled original
for x in range ( 0, res.width ):
for y in range( 0, res.height ):
res[y, x] = ( 0, 0, 0 )
colors = [( 0, 0, 255 ), ( 0, 255, 0 ), ( 255, 0, 0 ), ( 255, 0, 255 ), ( 255, 255, 0 ), ( 0, 0, 255 ), ( 255, 255, 255 )]
color_counter = 0;
for region in regions:
color_counter = ( ( color_counter + 1 ) % len( colors ) )
for point in region:
res[point[1], point[0]] = colors[color_counter]
show_wait( res, caption )
def compare_pixels( p1, p2, tolerance ):
diff = 0
if isinstance ( p1, collections.Iterable ):
temp_tolerance = 0
for dim in range( 0, len( p1 ) ):
diff += abs( p1[dim] - p2[dim] )
temp_tolerance += tolerance
tolerance = temp_tolerance
else:
diff = abs( p1 - p2 );
return diff < tolerance
# returns a window median_radius size for processing
def optimal_radius( picture ):
return picture.height / 30
# returns a global map of text-related energy for the picture
def text_energy_map( image ):
image = split_channels( image )
radius = optimal_radius( image )
laplac = laplacian( image )
laplac = gaussian_blur_icl( laplac, ( radius, radius ), radius )
# init compass results
result_0 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
result_45 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
result_90 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
result_135 = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
# compass one: operator & result
ker_0 = cv.CreateMat( 3, 3, cv.CV_8S )
cv.Set( ker_0, -1 )
cv.Set( ker_0[1, :], 2 )
cv.Filter2D( image, result_0, ker_0 )
# compass two
ker_45 = cv.CreateMat( 3, 3, cv.CV_8S )
cv.Set( ker_45, -1 )
ker_45[2, 2] = ker_45[1, 1] = ker_45[0, 0] = 2;
cv.Filter2D( image, result_45, ker_45 )
# compass three
ker_90 = cv.CreateMat( 3, 3, cv.CV_8S )
cv.Set( ker_90, -1 )
cv.Set( ker_90[:, 1], 2 )
cv.Filter2D( image, result_90, ker_90 )
# compass four
ker_135 = cv.CreateMat( 3, 3, cv.CV_8S )
cv.Set( ker_135, -1 )
ker_135[2, 0] = ker_135[1, 1] = ker_135[0, 2] = 2;
cv.Filter2D( image, result_135, ker_135 )
# prepare result image and temporary helper
density = cv.CreateImage( cv.GetSize( image ), image.depth, 1 )
temp = cv.CreateImage( cv.GetSize( image ), image.depth, 1 )
cv.AddWeighted( result_0, 0.5, result_90, 0.5, 0, density )
cv.AddWeighted( result_45, 0.5, result_135, 0.5, 0, temp )
cv.AddWeighted( temp, 0.5, density, 0.5, 0, density )
# display obtained (weighted sum) of constituent images
# blur the density to highlight areas
density = gaussian_blur_icl( density, ( 3, 3 ), radius )
# displays again
# show_wait( density, "cpoo" )
# create a map of pixel weights - proportional to a total of orientations within window
orients = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
# threshold each picture - simulates summing orientations upon subsequent addition of images
cv.Threshold( result_0, result_0, 128, 255, 0 )
cv.Threshold( result_45, result_45, 128, 255, 0 )
cv.Threshold( result_90, result_90, 128, 255, 0 )
cv.Threshold( result_135, result_135, 128, 255, 0 )
# just summing - don't like that part really
cv.AddWeighted( result_0, 0.5, result_45, 0.5, 0, result_0 )
cv.AddWeighted( result_90, 0.5, result_135, 0.5, 0, result_90 )
cv.AddWeighted( result_90, 0.5, result_0, 0.5, 0, orients )
# show( orients, "orients raw" )
# equalizing and slightly blurring. not nice really again
cv.EqualizeHist( orients, orients )
# orients = gaussian_blur_icl( orients, ( 3, 3 ), radius )
# show_wait( orients, "cpoo" )
# manually convolve density and orients
for x in range( 0, density.width ):
for y in range( 0, density.height ):
density[y, x] *= orients[y, x] * laplac[y, x] / ( 255.0 * 255.0 )
cv.EqualizeHist( density, density )
return density
def gaussian_blur_icl( image, ksize, sigmaX ):
a = ksize[0]
b = ksize[1]
if a % 2 == 0:
a += 1;
if b % 2 == 0:
b += 1
return array2cv( cv2.GaussianBlur( cv2array( image ), ( a, b ), sigmaX ) )
def average_pixels( image, dims ):
kernel = cv.CreateMat( dims[0], dims[1], cv.CV_32F )
cv.Set( kernel, 1.0 / ( dims[0] * dims[1] ) )
cv.Filter2D( image, image, kernel )
return image
def laplacian( image ):
dst = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
kernel = cv.CreateMat( 3, 3, cv.CV_32F )
cv.Set( kernel, 1 )
kernel[0, 1] = kernel[1, 0] = kernel[2, 1] = kernel[1, 2] = 2
kernel[1, 1] = -12
cv.Filter2D( image, dst, kernel )
return dst
# from openCV python wiki
def cv2array( im ):
depth2dtype = {
cv.IPL_DEPTH_8U: 'uint8',
cv.IPL_DEPTH_8S: 'int8',
cv.IPL_DEPTH_16U: 'uint16',
cv.IPL_DEPTH_16S: 'int16',
cv.IPL_DEPTH_32S: 'int32',
cv.IPL_DEPTH_32F: 'float32',
cv.IPL_DEPTH_64F: 'float64',
}
arrdtype = im.depth
a = np.fromstring(
im.tostring(),
dtype=depth2dtype[im.depth],
count=im.width * im.height * im.nChannels )
a.shape = ( im.height, im.width, im.nChannels )
return a
# from openCV python wiki
def array2cv( a ):
dtype2depth = {
'uint8': cv.IPL_DEPTH_8U,
'int8': cv.IPL_DEPTH_8S,
'uint16': cv.IPL_DEPTH_16U,
'int16': cv.IPL_DEPTH_16S,
'int32': cv.IPL_DEPTH_32S,
'float32': cv.IPL_DEPTH_32F,
'float64': cv.IPL_DEPTH_64F,
}
try:
nChannels = a.shape[2]
except:
nChannels = 1
cv_im = cv.CreateImageHeader( ( a.shape[1], a.shape[0] ),
dtype2depth[str( a.dtype )],
nChannels )
cv.SetData( cv_im, a.tostring(),
a.dtype.itemsize * nChannels * a.shape[1] )
return cv_im
def sum_orients( img, x, y, median_radius, threshold=255 ):
sum = 0
for curx in range( max( x - median_radius, 0 ), min( x + median_radius, img.width ) ):
for cury in range( max( y - median_radius, 0 ), min( y + median_radius, img.height ) ):
if( img[cury, curx] >= threshold ):
sum += 1
return sum
# shortcut for openCV display functions
def show( picture, desc="no_desc" ):
return
cv.ShowImage( desc, picture )
cv.WaitKey()
def show_wait( picture, desc="no_desc" ):
cv.ShowImage( desc, picture )
cv.WaitKey()
def sum_array( array ):
return sum( np.asarray( array ).reshape( -1 ).tolist() )
def find_density_maximum( image ):
i = [0, 0, image.width, image.height]
max_copy = i
max_density = 0
while True:
# print "current density: " + str( max_density )
for ind in range( 0, 4 ):
copy = list( i )
if ind > 1:
copy[ind] -= 1
else:
copy[ind] += 1
density = sum_array( image[copy[1] : copy[3], copy[0] : copy[2]] ) ** 2 * 1.0 / ( ( copy[3] - copy[1] ) * ( copy[2] - copy[0] ) ) ** 2
# print "candidate_density: " + str( density )
if density > max_density:
max_density = density
max_copy = copy
if i == max_copy:
break
else:
i = max_copy
return max_copy
| Python |
import tesseract
import sys
api = tesseract.TessBaseAPI()
api.Init(".","eng",tesseract.OEM_DEFAULT)
api.SetVariable("tessedit_char_whitelist", "0123456789\.\:")
api.SetPageSegMode(tesseract.PSM_AUTO)
mImgFile = sys.argv[1]
mBuffer=open(mImgFile,"rb").read()
result = tesseract.ProcessPagesBuffer(mBuffer,len(mBuffer),api)
print "result(ProcessPagesBuffer)=",result
| Python |
"""Utility functions for processing images for delivery to Tesseract"""
import os
def image_to_scratch(im, scratch_image_name):
"""Saves image in memory to scratch file. .bmp format will be read correctly by Tesseract"""
im.save(scratch_image_name, dpi=(200,200))
def retrieve_text(scratch_text_name_root):
inf = file(scratch_text_name_root + '.txt')
text = inf.read()
inf.close()
return text
def perform_cleanup(scratch_image_name, scratch_text_name_root):
"""Clean up temporary files from disk"""
for name in (scratch_image_name, scratch_text_name_root + '.txt', "tesseract.log"):
try:
os.remove(name)
except OSError:
pass
| Python |
"""Test for exceptions raised in the tesseract.exe logfile"""
class Tesser_General_Exception(Exception):
pass
class Tesser_Invalid_Filetype(Tesser_General_Exception):
pass
def check_for_errors(logfile = "tesseract.log"):
inf = file(logfile)
text = inf.read()
inf.close()
# All error conditions result in "Error" somewhere in logfile
if text.find("Error") != -1:
raise Tesser_General_Exception, text | Python |
import cv2
import numpy as np
img = cv2.imread('meat/551054_346968652052693_986626722_n.jpg')
h = np.zeros((300,256,3))
bins = np.arange(256).reshape(256,1)
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv2.calcHist([img],[ch],None,[256],[0,255])
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.column_stack((bins,hist))
cv2.polylines(h,[pts],False,col)
h=np.flipud(h)
cv2.imshow('colorhist',h)
cv2.waitKey(0)
| Python |
from PIL import Image
import ImageEnhance
from pytesser import *
from urllib import urlretrieve
import sys
im = Image.open(sys.argv[1])
nx, ny = im.size
im2 = im.resize((int(nx*5), int(ny*5)), Image.BICUBIC)
im2.save("temp2.png")
enh = ImageEnhance.Contrast(im)
enh.enhance(1.3).show("30% more contrast")
imgx = Image.open('temp2.png')
imgx = imgx.convert("RGBA")
pix = imgx.load()
for y in xrange(imgx.size[1]):
for x in xrange(imgx.size[0]):
if pix[x, y] != (0, 0, 0, 255):
pix[x, y] = (255, 255, 255, 255)
imgx.save("bw.gif", "GIF")
original = Image.open('bw.gif')
bg = original.resize((116, 56), Image.NEAREST)
ext = ".tif"
bg.save("input-NEAREST" + ext)
image = Image.open('input-NEAREST.tif')
print image_to_string(image)
| Python |
import cv2.cv as cv
import tesseract
import sys
api = tesseract.TessBaseAPI()
api.Init(".","eng",tesseract.OEM_DEFAULT)
api.SetPageSegMode(tesseract.PSM_AUTO)
image=cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
tesseract.SetCvImage(image,api)
text=api.GetUTF8Text()
conf=api.MeanTextConf()
print "scanned text: "+text
| Python |
#!/usr/bin/env python
# -*- encoding:utf8 -*-
# protoc-gen-erl
# Google's Protocol Buffers project, ported to lua.
# https://code.google.com/p/protoc-gen-lua/
#
# Copyright (c) 2010 , 林卓毅 (Zhuoyi Lin) netsnail@gmail.com
# All rights reserved.
#
# Use, modification and distribution are subject to the "New BSD License"
# as listed at <url: http://www.opensource.org/licenses/bsd-license.php >.
import sys
import os.path as path
from cStringIO import StringIO
import plugin_pb2
import google.protobuf.descriptor_pb2 as descriptor_pb2
_packages = {}
_files = {}
_message = {}
FDP = plugin_pb2.descriptor_pb2.FieldDescriptorProto
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
class CppType:
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
CPP_TYPE ={
FDP.TYPE_DOUBLE : CppType.CPPTYPE_DOUBLE,
FDP.TYPE_FLOAT : CppType.CPPTYPE_FLOAT,
FDP.TYPE_INT64 : CppType.CPPTYPE_INT64,
FDP.TYPE_UINT64 : CppType.CPPTYPE_UINT64,
FDP.TYPE_INT32 : CppType.CPPTYPE_INT32,
FDP.TYPE_FIXED64 : CppType.CPPTYPE_UINT64,
FDP.TYPE_FIXED32 : CppType.CPPTYPE_UINT32,
FDP.TYPE_BOOL : CppType.CPPTYPE_BOOL,
FDP.TYPE_STRING : CppType.CPPTYPE_STRING,
FDP.TYPE_MESSAGE : CppType.CPPTYPE_MESSAGE,
FDP.TYPE_BYTES : CppType.CPPTYPE_STRING,
FDP.TYPE_UINT32 : CppType.CPPTYPE_UINT32,
FDP.TYPE_ENUM : CppType.CPPTYPE_ENUM,
FDP.TYPE_SFIXED32 : CppType.CPPTYPE_INT32,
FDP.TYPE_SFIXED64 : CppType.CPPTYPE_INT64,
FDP.TYPE_SINT32 : CppType.CPPTYPE_INT32,
FDP.TYPE_SINT64 : CppType.CPPTYPE_INT64
}
def printerr(*args):
sys.stderr.write(" ".join(args))
sys.stderr.write("\n")
sys.stderr.flush()
class TreeNode(object):
def __init__(self, name, parent=None, filename=None, package=None):
super(TreeNode, self).__init__()
self.child = []
self.parent = parent
self.filename = filename
self.package = package
if parent:
self.parent.add_child(self)
self.name = name
def add_child(self, child):
self.child.append(child)
def find_child(self, child_names):
if child_names:
for i in self.child:
if i.name == child_names[0]:
return i.find_child(child_names[1:])
raise StandardError
else:
return self
def get_child(self, child_name):
for i in self.child:
if i.name == child_name:
return i
return None
def get_path(self, end = None):
pos = self
out = []
while pos and pos != end:
out.append(pos.name)
pos = pos.parent
out.reverse()
return '.'.join(out)
def get_global_name(self):
return self.get_path()
def get_local_name(self):
pos = self
while pos.parent:
pos = pos.parent
if self.package and pos.name == self.package[-1]:
break
return self.get_path(pos)
def __str__(self):
return self.to_string(0)
def __repr__(self):
return str(self)
def to_string(self, indent = 0):
return ' '*indent + '<TreeNode ' + self.name + '(\n' + \
','.join([i.to_string(indent + 4) for i in self.child]) + \
' '*indent +')>\n'
class Env(object):
filename = None
package = None
extend = None
descriptor = None
message = None
context = None
register = None
def __init__(self):
self.message_tree = TreeNode('')
self.scope = self.message_tree
def get_global_name(self):
return self.scope.get_global_name()
def get_local_name(self):
return self.scope.get_local_name()
def get_ref_name(self, type_name):
try:
node = self.lookup_name(type_name)
except:
# if the child doesn't be founded, it must be in this file
return type_name[len('.'.join(self.package)) + 2:]
if node.filename != self.filename:
return node.filename + '_pb.' + node.get_local_name()
return node.get_local_name()
def lookup_name(self, name):
names = name.split('.')
if names[0] == '':
return self.message_tree.find_child(names[1:])
else:
return self.scope.parent.find_child(names)
def enter_package(self, package):
if not package:
return self.message_tree
names = package.split('.')
pos = self.message_tree
for i, name in enumerate(names):
new_pos = pos.get_child(name)
if new_pos:
pos = new_pos
else:
return self._build_nodes(pos, names[i:])
return pos
def enter_file(self, filename, package):
self.filename = filename
self.package = package.split('.')
self._init_field()
self.scope = self.enter_package(package)
def exit_file(self):
self._init_field()
self.filename = None
self.package = []
self.scope = self.scope.parent
def enter(self, message_name):
self.scope = TreeNode(message_name, self.scope, self.filename,
self.package)
def exit(self):
self.scope = self.scope.parent
def _init_field(self):
self.descriptor = []
self.context = []
self.message = []
self.register = []
def _build_nodes(self, node, names):
parent = node
for i in names:
parent = TreeNode(i, parent, self.filename, self.package)
return parent
class Writer(object):
def __init__(self, prefix=None):
self.io = StringIO()
self.__indent = ''
self.__prefix = prefix
def getvalue(self):
return self.io.getvalue()
def __enter__(self):
self.__indent += ' '
return self
def __exit__(self, type, value, trackback):
self.__indent = self.__indent[:-4]
def __call__(self, data):
self.io.write(self.__indent)
if self.__prefix:
self.io.write(self.__prefix)
self.io.write(data)
DEFAULT_VALUE = {
FDP.TYPE_DOUBLE : '0.0',
FDP.TYPE_FLOAT : '0.0',
FDP.TYPE_INT64 : '0',
FDP.TYPE_UINT64 : '0',
FDP.TYPE_INT32 : '0',
FDP.TYPE_FIXED64 : '0',
FDP.TYPE_FIXED32 : '0',
FDP.TYPE_BOOL : 'false',
FDP.TYPE_STRING : '""',
FDP.TYPE_MESSAGE : 'nil',
FDP.TYPE_BYTES : '""',
FDP.TYPE_UINT32 : '0',
FDP.TYPE_ENUM : '1',
FDP.TYPE_SFIXED32 : '0',
FDP.TYPE_SFIXED64 : '0',
FDP.TYPE_SINT32 : '0',
FDP.TYPE_SINT64 : '0',
}
def code_gen_enum_item(index, enum_value, env):
full_name = env.get_local_name() + '.' + enum_value.name
obj_name = full_name.upper().replace('.', '_') + '_ENUM'
env.descriptor.append(
"local %s = protobuf.EnumValueDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % enum_value.name)
context('.index = %d\n' % index)
context('.number = %d\n' % enum_value.number)
env.context.append(context.getvalue())
return obj_name
def code_gen_enum(enum_desc, env):
env.enter(enum_desc.name)
full_name = env.get_local_name()
obj_name = full_name.upper().replace('.', '_')
env.descriptor.append(
"local %s = protobuf.EnumDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % enum_desc.name)
context('.full_name = "%s"\n' % env.get_global_name())
values = []
for i, enum_value in enumerate(enum_desc.value):
values.append(code_gen_enum_item(i, enum_value, env))
context('.values = {%s}\n' % ','.join(values))
env.context.append(context.getvalue())
env.exit()
return obj_name
def code_gen_field(index, field_desc, env):
full_name = env.get_local_name() + '.' + field_desc.name
obj_name = full_name.upper().replace('.', '_') + '_FIELD'
env.descriptor.append(
"local %s = protobuf.FieldDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % field_desc.name)
context('.full_name = "%s"\n' % (
env.get_global_name() + '.' + field_desc.name))
context('.number = %d\n' % field_desc.number)
context('.index = %d\n' % index)
context('.label = %d\n' % field_desc.label)
if field_desc.HasField("default_value"):
context('.has_default_value = true\n')
value = field_desc.default_value
if field_desc.type == FDP.TYPE_STRING:
context('.default_value = "%s"\n'%value)
else:
context('.default_value = %s\n'%value)
else:
context('.has_default_value = false\n')
if field_desc.label == FDP.LABEL_REPEATED:
default_value = "{}"
elif field_desc.HasField('type_name'):
default_value = "nil"
else:
default_value = DEFAULT_VALUE[field_desc.type]
context('.default_value = %s\n' % default_value)
if field_desc.HasField('type_name'):
type_name = env.get_ref_name(field_desc.type_name).upper().replace('.', '_')
if field_desc.type == FDP.TYPE_MESSAGE:
context('.message_type = %s\n' % type_name)
else:
context('.enum_type = %s\n' % type_name)
if field_desc.HasField('extendee'):
type_name = env.get_ref_name(field_desc.extendee)
env.register.append(
"%s.RegisterExtension(%s)\n" % (type_name, obj_name)
)
context('.type = %d\n' % field_desc.type)
context('.cpp_type = %d\n\n' % CPP_TYPE[field_desc.type])
env.context.append(context.getvalue())
return obj_name
def code_gen_message(message_descriptor, env, containing_type = None):
env.enter(message_descriptor.name)
full_name = env.get_local_name()
obj_name = full_name.upper().replace('.', '_')
env.descriptor.append(
"local %s = protobuf.Descriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % message_descriptor.name)
context('.full_name = "%s"\n' % env.get_global_name())
nested_types = []
for msg_desc in message_descriptor.nested_type:
msg_name = code_gen_message(msg_desc, env, obj_name)
nested_types.append(msg_name)
context('.nested_types = {%s}\n' % ', '.join(nested_types))
enums = []
for enum_desc in message_descriptor.enum_type:
enums.append(code_gen_enum(enum_desc, env))
context('.enum_types = {%s}\n' % ', '.join(enums))
fields = []
for i, field_desc in enumerate(message_descriptor.field):
fields.append(code_gen_field(i, field_desc, env))
context('.fields = {%s}\n' % ', '.join(fields))
if len(message_descriptor.extension_range) > 0:
context('.is_extendable = true\n')
else:
context('.is_extendable = false\n')
extensions = []
for i, field_desc in enumerate(message_descriptor.extension):
extensions.append(code_gen_field(i, field_desc, env))
context('.extensions = {%s}\n' % ', '.join(extensions))
if containing_type:
context('.containing_type = %s\n' % containing_type)
env.message.append('%s = protobuf.Message(%s)\n' % (full_name,
obj_name))
env.context.append(context.getvalue())
env.exit()
return obj_name
def write_header(writer):
writer("""-- Generated By protoc-gen-lua Do not Edit
""")
def code_gen_file(proto_file, env, is_gen):
filename = path.splitext(proto_file.name)[0]
env.enter_file(filename, proto_file.package)
includes = []
for f in proto_file.dependency:
inc_file = path.splitext(f)[0]
includes.append(inc_file)
# for field_desc in proto_file.extension:
# code_gen_extensions(field_desc, field_desc.name, env)
for enum_desc in proto_file.enum_type:
code_gen_enum(enum_desc, env)
for enum_value in enum_desc.value:
env.message.append('%s = %d\n' % (enum_value.name,
enum_value.number))
for msg_desc in proto_file.message_type:
code_gen_message(msg_desc, env)
if is_gen:
lua = Writer()
write_header(lua)
lua('local protobuf = require "protobuf"\n')
for i in includes:
lua('local %s_pb = require("%s_pb")\n' % (i, i))
lua("module('%s_pb')\n" % env.filename)
lua('\n\n')
map(lua, env.descriptor)
lua('\n')
map(lua, env.context)
lua('\n')
env.message.sort()
map(lua, env.message)
lua('\n')
map(lua, env.register)
_files[env.filename+ '_pb.lua'] = lua.getvalue()
env.exit_file()
def main():
plugin_require_bin = sys.stdin.read()
code_gen_req = plugin_pb2.CodeGeneratorRequest()
code_gen_req.ParseFromString(plugin_require_bin)
env = Env()
for proto_file in code_gen_req.proto_file:
code_gen_file(proto_file, env,
proto_file.name in code_gen_req.file_to_generate)
code_generated = plugin_pb2.CodeGeneratorResponse()
for k in _files:
file_desc = code_generated.file.add()
file_desc.name = k
file_desc.content = _files[k]
sys.stdout.write(code_generated.SerializeToString())
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
"""
tesshelper.py -- Utility operations to compare, report stats, and copy
public headers for tesseract 3.0x VS2008 Project
$RCSfile: tesshelper.py,v $ $Revision: 7ca575b377aa $ $Date: 2012/03/07 17:26:31 $
"""
r"""
Requires:
python 2.7 or greater: activestate.com
http://www.activestate.com/activepython/downloads
because using the new argparse module and new literal set syntax (s={1, 2}) .
General Notes:
--------------
Format for a .vcproj file entry:
<File
RelativePath="..\src\allheaders.h"
>
</File>
"""
epilogStr = r"""
Examples:
Assume that tesshelper.py is in c:\buildfolder\tesseract-3.02\vs2008,
which is also the current directory. Then,
python tesshelper .. compare
will compare c:\buildfolder\tesseract-3.02 "library" directories to the
libtesseract Project
(c:\buildfolder\tesseract-3.02\vs2008\libtesseract\libtesseract.vcproj).
python tesshelper .. report
will display summary stats for c:\buildfolder\tesseract-3.02 "library"
directories and the libtesseract Project.
python tesshelper .. copy ..\..\include
will copy all "public" libtesseract header files to
c:\buildfolder\include.
python tesshelper .. clean
will clean the vs2008 folder of all build directories, and .user, .suo,
.ncb, and other temp files.
"""
# imports of python standard library modules
# See Python Documentation | Library Reference for details
import collections
import glob
import argparse
import os
import re
import shutil
import sys
# ====================================================================
VERSION = "1.0 %s" % "$Date: 2012/03/07 17:26:31 $".split()[1]
PROJ_SUBDIR = r"vs2008\libtesseract"
PROJFILE = "libtesseract.vcproj"
NEWHEADERS_FILENAME = "newheaders.txt"
NEWSOURCES_FILENAME = "newsources.txt"
fileNodeTemplate = \
''' <File
RelativePath="..\..\%s"
>
</File>
'''
# ====================================================================
def getProjectfiles(libTessDir, libProjectFile, nTrimChars):
"""Return sets of all, c, h, and resources files in libtesseract Project"""
#extract filenames of header & source files from the .vcproj
projectCFiles = set()
projectHFiles = set()
projectRFiles = set()
projectFilesSet = set()
f = open(libProjectFile, "r")
data = f.read()
f.close()
projectFiles = re.findall(r'(?i)RelativePath="(\.[^"]+)"', data)
for projectFile in projectFiles:
root, ext = os.path.splitext(projectFile.lower())
if ext == ".c" or ext == ".cpp":
projectCFiles.add(projectFile)
elif ext == ".h":
projectHFiles.add(projectFile)
elif ext == ".rc":
projectRFiles.add(projectFile)
else:
print "unknown file type: %s" % projectFile
relativePath = os.path.join(libTessDir, projectFile)
relativePath = os.path.abspath(relativePath)
relativePath = relativePath[nTrimChars:].lower()
projectFilesSet.add(relativePath)
return projectFilesSet, projectHFiles, projectCFiles, projectRFiles
def getTessLibFiles(tessDir, nTrimChars):
"""Return set of all libtesseract files in tessDir"""
libDirs = [
"api",
"ccmain",
"ccstruct",
"ccutil",
"classify",
"cube",
"cutil",
"dict",
r"neural_networks\runtime",
"opencl",
"textord",
"viewer",
"wordrec",
#"training",
r"vs2008\port",
r"vs2008\libtesseract",
]
#create list of all .h, .c, .cpp files in "library" directories
tessFiles = set()
for curDir in libDirs:
baseDir = os.path.join(tessDir, curDir)
for filetype in ["*.c", "*.cpp", "*.h", "*.rc"]:
pattern = os.path.join(baseDir, filetype)
fileList = glob.glob(pattern)
for curFile in fileList:
curFile = os.path.abspath(curFile)
relativePath = curFile[nTrimChars:].lower()
tessFiles.add(relativePath)
return tessFiles
# ====================================================================
def tessCompare(tessDir):
'''Compare libtesseract Project files and actual "sub-library" files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
print 'Comparing VS2008 Project "%s" with\n "%s"' % (libProjectFile,
tessAbsDir)
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
extraFiles = tessFiles - projectFilesSet
print "%2d Extra files (in %s but not in Project)" % (len(extraFiles),
tessAbsDir)
headerFiles = []
sourceFiles = []
sortedList = list(extraFiles)
sortedList.sort()
for filename in sortedList:
root, ext = os.path.splitext(filename.lower())
if ext == ".h":
headerFiles.append(filename)
else:
sourceFiles.append(filename)
print " %s " % filename
print
print "%2d new header file items written to %s" % (len(headerFiles),
NEWHEADERS_FILENAME)
headerFiles.sort()
with open(NEWHEADERS_FILENAME, "w") as f:
for filename in headerFiles:
f.write(fileNodeTemplate % filename)
print "%2d new source file items written to %s" % (len(sourceFiles),
NEWSOURCES_FILENAME)
sourceFiles.sort()
with open(NEWSOURCES_FILENAME, "w") as f:
for filename in sourceFiles:
f.write(fileNodeTemplate % filename)
print
deadFiles = projectFilesSet - tessFiles
print "%2d Dead files (in Project but not in %s" % (len(deadFiles),
tessAbsDir)
sortedList = list(deadFiles)
sortedList.sort()
for filename in sortedList:
print " %s " % filename
# ====================================================================
def tessReport(tessDir):
"""Report summary stats on "sub-library" files and libtesseract Project file."""
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
print 'Summary stats for "%s" library directories' % tessAbsDir
folderCounters = {}
for tessFile in tessFiles:
tessFile = tessFile.lower()
folder, head = os.path.split(tessFile)
file, ext = os.path.splitext(head)
typeCounter = folderCounters.setdefault(folder, collections.Counter())
typeCounter[ext[1:]] += 1
folders = folderCounters.keys()
folders.sort()
totalFiles = 0
totalH = 0
totalCPP = 0
totalOther = 0
print
print " total h cpp"
print " ----- --- ---"
for folder in folders:
counters = folderCounters[folder]
nHFiles = counters['h']
nCPPFiles = counters['cpp']
total = nHFiles + nCPPFiles
totalFiles += total
totalH += nHFiles
totalCPP += nCPPFiles
print " %5d %3d %3d %s" % (total, nHFiles, nCPPFiles, folder)
print " ----- --- ---"
print " %5d %3d %3d" % (totalFiles, totalH, totalCPP)
print
print 'Summary stats for VS2008 Project "%s"' % libProjectFile
print " %5d %s" %(len(projectHFiles), "Header files")
print " %5d %s" % (len(projectCFiles), "Source files")
print " %5d %s" % (len(projectRFiles), "Resource files")
print " -----"
print " %5d" % (len(projectHFiles) + len(projectCFiles) + len(projectRFiles), )
# ====================================================================
def copyIncludes(fileSet, description, tessDir, includeDir):
"""Copy set of files to specified include dir."""
print
print 'Copying libtesseract "%s" headers to %s' % (description, includeDir)
print
sortedList = list(fileSet)
sortedList.sort()
count = 0
errList = []
for includeFile in sortedList:
filepath = os.path.join(tessDir, includeFile)
if os.path.isfile(filepath):
shutil.copy2(filepath, includeDir)
print "Copied: %s" % includeFile
count += 1
else:
print '***Error: "%s" doesn\'t exist"' % filepath
errList.append(filepath)
print '%d header files successfully copied to "%s"' % (count, includeDir)
if len(errList):
print "The following %d files were not copied:"
for filepath in errList:
print " %s" % filepath
def tessCopy(tessDir, includeDir):
'''Copy all "public" libtesseract Project header files to include directory.
Preserves directory hierarchy.'''
baseIncludeSet = {
r"api\baseapi.h",
r"api\capi.h",
r"api\apitypes.h",
r"ccstruct\publictypes.h",
r"ccmain\thresholder.h",
r"ccutil\host.h",
r"ccutil\basedir.h",
r"ccutil\tesscallback.h",
r"ccutil\unichar.h",
r"ccutil\platform.h",
}
strngIncludeSet = {
r"ccutil\strngs.h",
r"ccutil\memry.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\errcode.h",
r"ccutil\fileerr.h",
#r"ccutil\genericvector.h",
}
resultIteratorIncludeSet = {
r"ccmain\ltrresultiterator.h",
r"ccmain\pageiterator.h",
r"ccmain\resultiterator.h",
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
r"ccutil\params.h",
r"ccutil\unicharmap.h",
r"ccutil\unicharset.h",
}
genericVectorIncludeSet = {
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
}
blobsIncludeSet = {
r"ccstruct\blobs.h",
r"ccstruct\rect.h",
r"ccstruct\points.h",
r"ccstruct\ipoints.h",
r"ccutil\elst.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\lsterr.h",
r"ccutil\ndminx.h",
r"ccutil\tprintf.h",
r"ccutil\params.h",
r"viewer\scrollview.h",
r"ccstruct\vecfuncs.h",
}
extraFilesSet = {
#r"vs2008\include\stdint.h",
r"vs2008\include\leptonica_versionnumbers.vsprops",
r"vs2008\include\tesseract_versionnumbers.vsprops",
}
tessIncludeDir = os.path.join(includeDir, "tesseract")
if os.path.isfile(tessIncludeDir):
print 'Aborting: "%s" is a file not a directory.' % tessIncludeDir
return
if not os.path.exists(tessIncludeDir):
os.mkdir(tessIncludeDir)
#fileSet = baseIncludeSet | strngIncludeSet | genericVectorIncludeSet | blobsIncludeSet
fileSet = baseIncludeSet | strngIncludeSet | resultIteratorIncludeSet
copyIncludes(fileSet, "public", tessDir, tessIncludeDir)
copyIncludes(extraFilesSet, "extra", tessDir, includeDir)
# ====================================================================
def tessClean(tessDir):
'''Clean vs2008 folder of all build directories and certain temp files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
vs2008AbsDir = os.path.abspath(vs2008Dir)
answer = raw_input(
'Are you sure you want to clean the\n "%s" folder (Yes/No) [No]? ' %
vs2008AbsDir)
if answer.lower() not in ("yes",):
return
answer = raw_input('Only list the items to be deleted (Yes/No) [Yes]? ')
answer = answer.strip()
listOnly = answer.lower() not in ("no",)
for rootDir, dirs, files in os.walk(vs2008AbsDir):
for buildDir in ("LIB_Release", "LIB_Debug", "DLL_Release", "DLL_Debug"):
if buildDir in dirs:
dirs.remove(buildDir)
absBuildDir = os.path.join(rootDir, buildDir)
if listOnly:
print "Would remove: %s" % absBuildDir
else:
print "Removing: %s" % absBuildDir
shutil.rmtree(absBuildDir)
if rootDir == vs2008AbsDir:
for file in files:
if file.lower() not in ("tesseract.sln",
"tesshelper.py",
"readme.txt"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
else:
for file in files:
root, ext = os.path.splitext(file)
if ext.lower() in (".suo",
".ncb",
".user",
) or (
len(ext)>0 and ext[-1] == "~"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
# ====================================================================
def validateTessDir(tessDir):
"""Check that tessDir is a valid tesseract directory."""
if not os.path.isdir(tessDir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % tessDir)
projFile = os.path.join(tessDir, PROJ_SUBDIR, PROJFILE)
if not os.path.isfile(projFile):
raise argparse.ArgumentTypeError('Project file "%s" doesn\'t exist.' % projFile)
return tessDir
def validateDir(dir):
"""Check that dir is a valid directory named include."""
if not os.path.isdir(dir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % dir)
dirpath = os.path.abspath(dir)
head, tail = os.path.split(dirpath)
if tail.lower() != "include":
raise argparse.ArgumentTypeError('Include directory "%s" must be named "include".' % tail)
return dir
def main ():
parser = argparse.ArgumentParser(
epilog=epilogStr,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--version", action="version",
version="%(prog)s " + VERSION)
parser.add_argument('tessDir', type=validateTessDir,
help="tesseract installation directory")
subparsers = parser.add_subparsers(
dest="subparser_name",
title="Commands")
parser_changes = subparsers.add_parser('compare',
help="compare libtesseract Project with tessDir")
parser_changes.set_defaults(func=tessCompare)
parser_report = subparsers.add_parser('report',
help="report libtesseract summary stats")
parser_report.set_defaults(func=tessReport)
parser_copy = subparsers.add_parser('copy',
help="copy public libtesseract header files to includeDir")
parser_copy.add_argument('includeDir', type=validateDir,
help="Directory to copy header files to.")
parser_copy.set_defaults(func=tessCopy)
parser_clean = subparsers.add_parser('clean',
help="clean vs2008 folder of build folders and .user files")
parser_clean.set_defaults(func=tessClean)
#kludge because argparse has no ability to set default subparser
if (len(sys.argv) == 2):
sys.argv.append("compare")
args = parser.parse_args()
#handle commands
if args.func == tessCopy:
args.func(args.tessDir, args.includeDir)
else:
args.func(args.tessDir)
if __name__ == '__main__' :
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Zdenko Podobný
# Author: Zdenko Podobný
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple python demo script of tesseract-ocr 3.02 c-api
"""
import os
import sys
import ctypes
# Demo variables
lang = "eng"
filename = "../phototest.tif"
libpath = "/usr/local/lib64/"
libpath_w = "../vs2008/DLL_Release/"
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
if not TESSDATA_PREFIX:
TESSDATA_PREFIX = "../"
if sys.platform == "win32":
libname = libpath_w + "libtesseract302.dll"
libname_alt = "libtesseract302.dll"
os.environ["PATH"] += os.pathsep + libpath_w
else:
libname = libpath + "libtesseract.so.3.0.2"
libname_alt = "libtesseract.so.3"
try:
tesseract = ctypes.cdll.LoadLibrary(libname)
except:
try:
tesseract = ctypes.cdll.LoadLibrary(libname_alt)
except WindowsError, err:
print("Trying to load '%s'..." % libname)
print("Trying to load '%s'..." % libname_alt)
print(err)
exit(1)
tesseract.TessVersion.restype = ctypes.c_char_p
tesseract_version = tesseract.TessVersion()[:4]
# We need to check library version because libtesseract.so.3 is symlink
# and can point to other version than 3.02
if float(tesseract_version) < 3.02:
print("Found tesseract-ocr library version %s." % tesseract_version)
print("C-API is present only in version 3.02!")
exit(2)
api = tesseract.TessBaseAPICreate()
rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang);
if (rc):
tesseract.TessBaseAPIDelete(api)
print("Could not initialize tesseract.\n")
exit(3)
text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0);
result_text = ctypes.string_at(text_out)
print result_text
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import subprocess
import MySQLdb
import os
import re
import sys
import time
import statvfs
ip="114.80.213.44"
ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False)
ping.wait()
if ping.returncode != 0:
#print ping.returncode, "ERROR: failed to ping host. Please check."
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
conn.select_db('os')
sql = "insert into down(datetime) values(%s)"
param = (time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
sys.exit(1)
else:
print "OK"
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import os
import re
import sys
import time
import statvfs
import subprocess
import MySQLdb
import datetime
# globa re
re_meminfo_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
#
class OSstatus:
"""
result = report client status.
"""
def __init__(self, sleep=2):
"""Constructor
"""
self.sleep=sleep
def _get_mem_usage(self):
"""get mem used by percent
self.result = falot
"""
result={}
try:
fd=open('/proc/meminfo', 'r')
lines=fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
match=re_meminfo_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value=match.groups(['key', 'value'])
result[key]=int(value)
#print "mem :", 100*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
return 100.0*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
def get_mem_usage(self):
"""safe to call _get_memused()
self.result = falot
"""
try:
return self._get_mem_usage()
except Exception, e:
print "_get_mem_usage(self) Exception, %s"%e
return 0
def get_5m_load(self):
"""get 5 mines avg load
self.result = float
"""
try:
return (os.getloadavg())[2]
except Exception, e:
print "_get_5m_load(self) Exception, %s"%e
return 0
def _read_cpu_usage(self):
"""Read the current system cpu usage from /proc/stat."""
try:
fd = open("/proc/stat", 'r')
lines = fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
l = line.split()
if len(l) < 5:
continue
if l[0].startswith('cpu'):
return l
return {}
def get_cpu_usage(self):
"""get cpu avg used by percent
"""
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
#cpu usage=[(user_2 +sys_2+nice_2) - (user_1 + sys_1+nice_1)]/(total_2 - total_1)*100
usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
#usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[4])
time.sleep(self.sleep)
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
usni2=long(cpustr[1])+long(cpustr[2])+float(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn2=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
cpuper=(usn2-usn1)/(usni2-usni1)
return 100*cpuper
def get_os_info(self):
"""overide all functions.
"""
#return {"cpu": "%s"%round(float(self.get_cpu_usage()), 2),\
# "mem": "%s"%round(float(self.get_mem_usage()), 2),\
# "load": "%s"%round(float(self.get_5m_load()), 2),\
# }
print "%s"%round(float(self.get_cpu_usage()), 2)
print "%s"%round(float(self.get_mem_usage()), 2)
print "%s"%round(float(self.get_5m_load()), 2)
d = datetime.datetime.now()
d = d + datetime.timedelta(seconds=54000)
ping_response = subprocess.Popen(["/bin/ping", "-c1", "-w100", "114.80.213.44"], stdout=subprocess.PIPE).stdout.read()
conn2 = MySQLdb.connect(host='114.80.213.44',port=3306,user='triniti',passwd='triniti',db='faithwar')
cursor2=conn2.cursor()
cursor2.execute("select count(1) from (SELECT DISTINCT logInfo FROM tbheartbeata UNION SELECT DISTINCT logInfo FROM tbheartbeatb)as tb")
row = cursor2.fetchone()
cursor2.close()
conn2.close()
conn_output = row[0]
#print row[0]
#conn_output = subprocess.Popen(["netstat -na|grep ESTABLISHED|awk '{print $5}'|awk -F: '{print $1}'|sort|uniq -c|sort -r|wc -l"],stdout=subprocess.PIPE,shell=True).stdout.read()
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
###select db
conn.select_db('os')
sql = "insert into os(cpu,mem,loadavg,ping,conn,datetime) values(%s,%s,%s,%s,%s,%s)"
param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,d)
#param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
#print ping_response
###connect to databases
#conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
#cursor=conn.cursor()
###select db
#conn.select_db('os')
#cpu = '123'
#mem = '456'
#cursor.execute("INSERT INTO os(cpu,mem,loadavg,datetime) VALUES('10','20','30','2012-07-16')")
#sql = "insert into os(cpu,mem,loadavg,datetime) values(%s,%s,%s,%s)"
#param = ("aaa","bbb","ccc",int(time.time()))
#n = cursor.execute(sql,param)
#print n
#def getConn():
# host="localhost"
# username="root"
# pwd="root"
# database="os"
# return MySQLdb.connect(host=host,user=username,passwd=pwd,db=database)
#cursor.close()
#conn.commit()
#conn.close()
###############################################
#
# unittest
#
###############################################
import unittest
class clientTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_cpu(self):
"""
cpu
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_cpu_usage()), float)
return
def test_mem(self):
"""
mem
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_mem_usage()), float)
return
def test_load(self):
"""
load
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_5m_load()), float)
return
def test_all(self):
"""
load
"""
osinfo=OSstatus()
self.assertEqual(type(osinfo.get_os_info()), dict)
return
if __name__=='__test__':
unittest.main()
elif __name__=='__main__':
print "-"*20
print OSstatus(2).get_os_info()
print "-"*20
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import subprocess
import MySQLdb
import os
import re
import sys
import time
import statvfs
ip="114.80.213.44"
ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False)
ping.wait()
if ping.returncode != 0:
#print ping.returncode, "ERROR: failed to ping host. Please check."
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
conn.select_db('os')
sql = "insert into down(datetime) values(%s)"
param = (time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
sys.exit(1)
else:
print "OK"
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import os
import re
import sys
import time
import statvfs
import subprocess
import MySQLdb
import datetime
# globa re
re_meminfo_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
#
class OSstatus:
"""
result = report client status.
"""
def __init__(self, sleep=2):
"""Constructor
"""
self.sleep=sleep
def _get_mem_usage(self):
"""get mem used by percent
self.result = falot
"""
result={}
try:
fd=open('/proc/meminfo', 'r')
lines=fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
match=re_meminfo_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value=match.groups(['key', 'value'])
result[key]=int(value)
#print "mem :", 100*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
return 100.0*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
def get_mem_usage(self):
"""safe to call _get_memused()
self.result = falot
"""
try:
return self._get_mem_usage()
except Exception, e:
print "_get_mem_usage(self) Exception, %s"%e
return 0
def get_5m_load(self):
"""get 5 mines avg load
self.result = float
"""
try:
return (os.getloadavg())[2]
except Exception, e:
print "_get_5m_load(self) Exception, %s"%e
return 0
def _read_cpu_usage(self):
"""Read the current system cpu usage from /proc/stat."""
try:
fd = open("/proc/stat", 'r')
lines = fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
l = line.split()
if len(l) < 5:
continue
if l[0].startswith('cpu'):
return l
return {}
def get_cpu_usage(self):
"""get cpu avg used by percent
"""
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
#cpu usage=[(user_2 +sys_2+nice_2) - (user_1 + sys_1+nice_1)]/(total_2 - total_1)*100
usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
#usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[4])
time.sleep(self.sleep)
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
usni2=long(cpustr[1])+long(cpustr[2])+float(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn2=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
cpuper=(usn2-usn1)/(usni2-usni1)
return 100*cpuper
def get_os_info(self):
"""overide all functions.
"""
#return {"cpu": "%s"%round(float(self.get_cpu_usage()), 2),\
# "mem": "%s"%round(float(self.get_mem_usage()), 2),\
# "load": "%s"%round(float(self.get_5m_load()), 2),\
# }
print "%s"%round(float(self.get_cpu_usage()), 2)
print "%s"%round(float(self.get_mem_usage()), 2)
print "%s"%round(float(self.get_5m_load()), 2)
d = datetime.datetime.now()
d = d + datetime.timedelta(seconds=54000)
ping_response = subprocess.Popen(["/bin/ping", "-c1", "-w100", "114.80.213.44"], stdout=subprocess.PIPE).stdout.read()
conn2 = MySQLdb.connect(host='114.80.213.44',port=3306,user='triniti',passwd='triniti',db='faithwar')
cursor2=conn2.cursor()
cursor2.execute("select count(1) from (SELECT DISTINCT logInfo FROM tbheartbeata UNION SELECT DISTINCT logInfo FROM tbheartbeatb)as tb")
row = cursor2.fetchone()
cursor2.close()
conn2.close()
conn_output = row[0]
#print row[0]
#conn_output = subprocess.Popen(["netstat -na|grep ESTABLISHED|awk '{print $5}'|awk -F: '{print $1}'|sort|uniq -c|sort -r|wc -l"],stdout=subprocess.PIPE,shell=True).stdout.read()
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
###select db
conn.select_db('os')
sql = "insert into os(cpu,mem,loadavg,ping,conn,datetime) values(%s,%s,%s,%s,%s,%s)"
param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,d)
#param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
#print ping_response
###connect to databases
#conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
#cursor=conn.cursor()
###select db
#conn.select_db('os')
#cpu = '123'
#mem = '456'
#cursor.execute("INSERT INTO os(cpu,mem,loadavg,datetime) VALUES('10','20','30','2012-07-16')")
#sql = "insert into os(cpu,mem,loadavg,datetime) values(%s,%s,%s,%s)"
#param = ("aaa","bbb","ccc",int(time.time()))
#n = cursor.execute(sql,param)
#print n
#def getConn():
# host="localhost"
# username="root"
# pwd="root"
# database="os"
# return MySQLdb.connect(host=host,user=username,passwd=pwd,db=database)
#cursor.close()
#conn.commit()
#conn.close()
###############################################
#
# unittest
#
###############################################
import unittest
class clientTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_cpu(self):
"""
cpu
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_cpu_usage()), float)
return
def test_mem(self):
"""
mem
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_mem_usage()), float)
return
def test_load(self):
"""
load
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_5m_load()), float)
return
def test_all(self):
"""
load
"""
osinfo=OSstatus()
self.assertEqual(type(osinfo.get_os_info()), dict)
return
if __name__=='__test__':
unittest.main()
elif __name__=='__main__':
print "-"*20
print OSstatus(2).get_os_info()
print "-"*20
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import subprocess
import MySQLdb
import os
import re
import sys
import time
import statvfs
ip="114.80.213.44"
ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False)
ping.wait()
if ping.returncode != 0:
#print ping.returncode, "ERROR: failed to ping host. Please check."
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
conn.select_db('os')
sql = "insert into down(datetime) values(%s)"
param = (time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
sys.exit(1)
else:
print "OK"
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import os
import re
import sys
import time
import statvfs
import subprocess
import MySQLdb
import datetime
# globa re
re_meminfo_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
#
class OSstatus:
"""
result = report client status.
"""
def __init__(self, sleep=2):
"""Constructor
"""
self.sleep=sleep
def _get_mem_usage(self):
"""get mem used by percent
self.result = falot
"""
result={}
try:
fd=open('/proc/meminfo', 'r')
lines=fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
match=re_meminfo_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value=match.groups(['key', 'value'])
result[key]=int(value)
#print "mem :", 100*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
return 100.0*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
def get_mem_usage(self):
"""safe to call _get_memused()
self.result = falot
"""
try:
return self._get_mem_usage()
except Exception, e:
print "_get_mem_usage(self) Exception, %s"%e
return 0
def get_5m_load(self):
"""get 5 mines avg load
self.result = float
"""
try:
return (os.getloadavg())[2]
except Exception, e:
print "_get_5m_load(self) Exception, %s"%e
return 0
def _read_cpu_usage(self):
"""Read the current system cpu usage from /proc/stat."""
try:
fd = open("/proc/stat", 'r')
lines = fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
l = line.split()
if len(l) < 5:
continue
if l[0].startswith('cpu'):
return l
return {}
def get_cpu_usage(self):
"""get cpu avg used by percent
"""
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
#cpu usage=[(user_2 +sys_2+nice_2) - (user_1 + sys_1+nice_1)]/(total_2 - total_1)*100
usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
#usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[4])
time.sleep(self.sleep)
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
usni2=long(cpustr[1])+long(cpustr[2])+float(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn2=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
cpuper=(usn2-usn1)/(usni2-usni1)
return 100*cpuper
def get_os_info(self):
"""overide all functions.
"""
#return {"cpu": "%s"%round(float(self.get_cpu_usage()), 2),\
# "mem": "%s"%round(float(self.get_mem_usage()), 2),\
# "load": "%s"%round(float(self.get_5m_load()), 2),\
# }
print "%s"%round(float(self.get_cpu_usage()), 2)
print "%s"%round(float(self.get_mem_usage()), 2)
print "%s"%round(float(self.get_5m_load()), 2)
d = datetime.datetime.now()
d = d + datetime.timedelta(seconds=54000)
ping_response = subprocess.Popen(["/bin/ping", "-c1", "-w100", "114.80.213.44"], stdout=subprocess.PIPE).stdout.read()
conn2 = MySQLdb.connect(host='114.80.213.44',port=3306,user='triniti',passwd='triniti',db='faithwar')
cursor2=conn2.cursor()
cursor2.execute("select count(1) from (SELECT DISTINCT logInfo FROM tbheartbeata UNION SELECT DISTINCT logInfo FROM tbheartbeatb)as tb")
row = cursor2.fetchone()
cursor2.close()
conn2.close()
conn_output = row[0]
#print row[0]
#conn_output = subprocess.Popen(["netstat -na|grep ESTABLISHED|awk '{print $5}'|awk -F: '{print $1}'|sort|uniq -c|sort -r|wc -l"],stdout=subprocess.PIPE,shell=True).stdout.read()
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
###select db
conn.select_db('os')
sql = "insert into os(cpu,mem,loadavg,ping,conn,datetime) values(%s,%s,%s,%s,%s,%s)"
param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,d)
#param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
#print ping_response
###connect to databases
#conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
#cursor=conn.cursor()
###select db
#conn.select_db('os')
#cpu = '123'
#mem = '456'
#cursor.execute("INSERT INTO os(cpu,mem,loadavg,datetime) VALUES('10','20','30','2012-07-16')")
#sql = "insert into os(cpu,mem,loadavg,datetime) values(%s,%s,%s,%s)"
#param = ("aaa","bbb","ccc",int(time.time()))
#n = cursor.execute(sql,param)
#print n
#def getConn():
# host="localhost"
# username="root"
# pwd="root"
# database="os"
# return MySQLdb.connect(host=host,user=username,passwd=pwd,db=database)
#cursor.close()
#conn.commit()
#conn.close()
###############################################
#
# unittest
#
###############################################
import unittest
class clientTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_cpu(self):
"""
cpu
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_cpu_usage()), float)
return
def test_mem(self):
"""
mem
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_mem_usage()), float)
return
def test_load(self):
"""
load
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_5m_load()), float)
return
def test_all(self):
"""
load
"""
osinfo=OSstatus()
self.assertEqual(type(osinfo.get_os_info()), dict)
return
if __name__=='__test__':
unittest.main()
elif __name__=='__main__':
print "-"*20
print OSstatus(2).get_os_info()
print "-"*20
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import subprocess
import MySQLdb
import os
import re
import sys
import time
import statvfs
ip="114.80.213.44"
ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False)
ping.wait()
if ping.returncode != 0:
#print ping.returncode, "ERROR: failed to ping host. Please check."
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
conn.select_db('os')
sql = "insert into down(datetime) values(%s)"
param = (time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
sys.exit(1)
else:
print "OK"
| Python |
#!/usr/bin/python -u
#-*- coding: UTF-8 -*-
import os
import re
import sys
import time
import statvfs
import subprocess
import MySQLdb
import datetime
# globa re
re_meminfo_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
#
class OSstatus:
"""
result = report client status.
"""
def __init__(self, sleep=2):
"""Constructor
"""
self.sleep=sleep
def _get_mem_usage(self):
"""get mem used by percent
self.result = falot
"""
result={}
try:
fd=open('/proc/meminfo', 'r')
lines=fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
match=re_meminfo_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value=match.groups(['key', 'value'])
result[key]=int(value)
#print "mem :", 100*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
return 100.0*(result["MemTotal"]-result["MemFree"])/result["MemTotal"]
def get_mem_usage(self):
"""safe to call _get_memused()
self.result = falot
"""
try:
return self._get_mem_usage()
except Exception, e:
print "_get_mem_usage(self) Exception, %s"%e
return 0
def get_5m_load(self):
"""get 5 mines avg load
self.result = float
"""
try:
return (os.getloadavg())[2]
except Exception, e:
print "_get_5m_load(self) Exception, %s"%e
return 0
def _read_cpu_usage(self):
"""Read the current system cpu usage from /proc/stat."""
try:
fd = open("/proc/stat", 'r')
lines = fd.readlines()
finally:
if fd:
fd.close()
for line in lines:
l = line.split()
if len(l) < 5:
continue
if l[0].startswith('cpu'):
return l
return {}
def get_cpu_usage(self):
"""get cpu avg used by percent
"""
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
#cpu usage=[(user_2 +sys_2+nice_2) - (user_1 + sys_1+nice_1)]/(total_2 - total_1)*100
usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
#usni1=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])+long(cpustr[4])
time.sleep(self.sleep)
cpustr=self._read_cpu_usage()
if not cpustr:
return 0
usni2=long(cpustr[1])+long(cpustr[2])+float(cpustr[3])+long(cpustr[5])+long(cpustr[6])+long(cpustr[7])+long(cpustr[4])
usn2=long(cpustr[1])+long(cpustr[2])+long(cpustr[3])
cpuper=(usn2-usn1)/(usni2-usni1)
return 100*cpuper
def get_os_info(self):
"""overide all functions.
"""
#return {"cpu": "%s"%round(float(self.get_cpu_usage()), 2),\
# "mem": "%s"%round(float(self.get_mem_usage()), 2),\
# "load": "%s"%round(float(self.get_5m_load()), 2),\
# }
print "%s"%round(float(self.get_cpu_usage()), 2)
print "%s"%round(float(self.get_mem_usage()), 2)
print "%s"%round(float(self.get_5m_load()), 2)
d = datetime.datetime.now()
d = d + datetime.timedelta(seconds=54000)
ping_response = subprocess.Popen(["/bin/ping", "-c1", "-w100", "114.80.213.44"], stdout=subprocess.PIPE).stdout.read()
conn2 = MySQLdb.connect(host='114.80.213.44',port=3306,user='triniti',passwd='triniti',db='faithwar')
cursor2=conn2.cursor()
cursor2.execute("select count(1) from (SELECT DISTINCT logInfo FROM tbheartbeata UNION SELECT DISTINCT logInfo FROM tbheartbeatb)as tb")
row = cursor2.fetchone()
cursor2.close()
conn2.close()
conn_output = row[0]
#print row[0]
#conn_output = subprocess.Popen(["netstat -na|grep ESTABLISHED|awk '{print $5}'|awk -F: '{print $1}'|sort|uniq -c|sort -r|wc -l"],stdout=subprocess.PIPE,shell=True).stdout.read()
conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
cursor=conn.cursor()
###select db
conn.select_db('os')
sql = "insert into os(cpu,mem,loadavg,ping,conn,datetime) values(%s,%s,%s,%s,%s,%s)"
param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,d)
#param = ( "%s"%round(float(self.get_cpu_usage()), 2),"%s"%round(float(self.get_mem_usage()), 2),"%s"%round(float(self.get_5m_load()), 2),ping_response[115:125],conn_output,time.strftime('%Y-%m-%d',time.localtime(time.time())))
n = cursor.execute(sql,param)
print n
cursor.close()
#print ping_response
###connect to databases
#conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root')
#cursor=conn.cursor()
###select db
#conn.select_db('os')
#cpu = '123'
#mem = '456'
#cursor.execute("INSERT INTO os(cpu,mem,loadavg,datetime) VALUES('10','20','30','2012-07-16')")
#sql = "insert into os(cpu,mem,loadavg,datetime) values(%s,%s,%s,%s)"
#param = ("aaa","bbb","ccc",int(time.time()))
#n = cursor.execute(sql,param)
#print n
#def getConn():
# host="localhost"
# username="root"
# pwd="root"
# database="os"
# return MySQLdb.connect(host=host,user=username,passwd=pwd,db=database)
#cursor.close()
#conn.commit()
#conn.close()
###############################################
#
# unittest
#
###############################################
import unittest
class clientTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_cpu(self):
"""
cpu
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_cpu_usage()), float)
return
def test_mem(self):
"""
mem
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_mem_usage()), float)
return
def test_load(self):
"""
load
"""
osinfo=OSstatus(2)
self.assertEqual(type(osinfo.get_5m_load()), float)
return
def test_all(self):
"""
load
"""
osinfo=OSstatus()
self.assertEqual(type(osinfo.get_os_info()), dict)
return
if __name__=='__test__':
unittest.main()
elif __name__=='__main__':
print "-"*20
print OSstatus(2).get_os_info()
print "-"*20
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Dieses Script kontrolliert eine Lösung. Es wird ausgegeben, wie viele Farben
# verwendet wurden und wie groß das Dreieck ist.
# Funktioniert nur mit korrekter Eingabedatei (keine Leerzeichen zum trennen der
# Farben, neue Zeilen für eine neue Ebene des n-Traumrechtecks)
import copy, os
from optparse import OptionParser
import psyco
psyco.full()
parser = OptionParser()
parser.add_option("-i", "--input", dest="solutionfile",
default= os.getcwd() + '/Beispiele/solution-4-errors.txt',
help="Textdatei mit der Loesung",
metavar="FILE")
parser.add_option("-s", "--short",
action="store_true", dest="short", default=False,
help="Die Meldungen erscheinen in Kurzform.")
parser.add_option("-e", "--errors",
action="store_true", dest="display_errors", default=False,
help="Die Fehler werden ausgegeben.")
(options, args) = parser.parse_args()
solutionfile = options.solutionfile
display_errors=options.display_errors
# 12345m = x
# 2
# 3
# 4
# 5
# n = y
# Get Data######################################################################
# my_data[ebene - startet oben ][position - startet links]
def check_all_left_top(rectangle, x1, y1):
# Auf Fehler überprüfen:
# Die aktuelle kugel (x1, y1) ist links oben
error_list = []
links_oben = rectangle[x1][y1]
for x2 in xrange(x1+1, m):
rechts_oben = rectangle[x2][y1]
for y2 in xrange(y1+1, n):
links_unten = rectangle[x1][y2]
rechts_unten = rectangle[x2][y2]
if links_oben == links_unten == rechts_oben == rechts_unten:
error_list.append([(x1,y1),(x1,y2), (x2, y1), (x2, y2)])
return error_list
def get_all_errors(rectangle):
errors = []
for x1, ebene in enumerate(rectangle):
for y1, farbe in enumerate(ebene):
errors_tmp = check_all_left_top(rectangle, x1, y1)
for line in errors_tmp:
errors.append(line)
return errors
def get_data(filename):
my_data = []
f = open(filename, 'r')
lines = f.readlines()
for line in lines:
temp = line.strip()
#temp = temp.split(' ')
my_data.append(temp)
return my_data
def get_colors(my_data):
colors = []
for liste in my_data:
for color in liste:
if not color in colors:
colors.append(color)
return colors
rectangle = get_data(solutionfile)
color_list = get_colors(rectangle)
n = len(rectangle)
m = len(rectangle[0])
error_list = get_all_errors(rectangle)
print str(n) + "x" + str(m)
print "Es wurden " + str(len(color_list)) + " Farben verwendet."
print "Es wurden " + str(len(error_list))+ " Fehler gefunden."
if display_errors:
for error in error_list:
print error
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Dieses Script kontrolliert eine Lösung. Es wird ausgegeben, wie viele Farben
# verwendet wurden und wie groß das Dreieck ist.
# Funktioniert nur mit korrekter Eingabedatei (keine Leerzeichen zum trennen der
# Farben, neue Zeilen für eine neue Ebene des n-Traumrechtecks)
import copy, os
from optparse import OptionParser
import psyco
psyco.full()
parser = OptionParser()
parser.add_option("-i", "--input", dest="solutionfile",
default= os.getcwd() + '/Beispiele/solution-4-errors.txt',
help="Textdatei mit der Loesung",
metavar="FILE")
parser.add_option("-s", "--short",
action="store_true", dest="short", default=False,
help="Die Meldungen erscheinen in Kurzform.")
parser.add_option("-e", "--errors",
action="store_true", dest="display_errors", default=False,
help="Die Fehler werden ausgegeben.")
(options, args) = parser.parse_args()
solutionfile = options.solutionfile
display_errors=options.display_errors
# 12345m = x
# 2
# 3
# 4
# 5
# n = y
# Get Data######################################################################
# my_data[ebene - startet oben ][position - startet links]
def check_all_left_top(rectangle, x1, y1):
# Auf Fehler überprüfen:
# Die aktuelle kugel (x1, y1) ist links oben
error_list = []
links_oben = rectangle[x1][y1]
for x2 in xrange(x1+1, m):
rechts_oben = rectangle[x2][y1]
for y2 in xrange(y1+1, n):
links_unten = rectangle[x1][y2]
rechts_unten = rectangle[x2][y2]
if links_oben == links_unten == rechts_oben == rechts_unten:
error_list.append([(x1,y1),(x1,y2), (x2, y1), (x2, y2)])
return error_list
def get_all_errors(rectangle):
errors = []
for x1, ebene in enumerate(rectangle):
for y1, farbe in enumerate(ebene):
errors_tmp = check_all_left_top(rectangle, x1, y1)
for line in errors_tmp:
errors.append(line)
return errors
def get_data(filename):
my_data = []
f = open(filename, 'r')
lines = f.readlines()
for line in lines:
temp = line.strip()
#temp = temp.split(' ')
my_data.append(temp)
return my_data
def get_colors(my_data):
colors = []
for liste in my_data:
for color in liste:
if not color in colors:
colors.append(color)
return colors
rectangle = get_data(solutionfile)
color_list = get_colors(rectangle)
n = len(rectangle)
m = len(rectangle[0])
error_list = get_all_errors(rectangle)
print str(n) + "x" + str(m)
print "Es wurden " + str(len(color_list)) + " Farben verwendet."
print "Es wurden " + str(len(error_list))+ " Fehler gefunden."
if display_errors:
for error in error_list:
print error
| Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
'''
Created on 18 avr. 12
@author: peterd
'''
class CalculationError:
pass
def get_1d_cutting_plan(stock_piece_length, custom_pieces, cut_thickness):
num_custom_pieces = len(custom_pieces)
cutting_schema = {}
i = 0
while i < num_custom_pieces:
num_elem = 1
prev_custom_packs = []
while num_elem <= num_custom_pieces - i:
k = num_elem - 1
n_elem_custom_packs = []
custom_pack = []
if num_elem == 1:
custom_pack.append((i + k, custom_pieces[i + k]))
n_elem_custom_packs.append(custom_pack)
# TODO: move to function
custom_pack_waste = stock_piece_length - custom_pieces[i + k]
if cutting_schema.has_key(custom_pack_waste):
if custom_pack not in cutting_schema[custom_pack_waste]:
cutting_schema[custom_pack_waste].append(custom_pack)
else:
cutting_schema[custom_pack_waste] = [custom_pack]
else:
for prev_custom_pack in prev_custom_packs:
l = k
while l < num_custom_pieces:
custom_pack = list(prev_custom_pack)
custom_pack_used_length = 0
for custom_piece in custom_pack:
custom_pack_used_length = custom_pack_used_length + custom_piece[1]
n = l
if i + n < num_custom_pieces:
while len(custom_pack) < num_elem:
try:
if custom_pack_used_length + custom_pieces[i + n] > stock_piece_length:
break
except IndexError:
pass
custom_pack.append((i + n, custom_pieces[i + n]))
custom_pack_used_length = custom_pack_used_length + custom_pieces[i + n]
n = n + 1
if i + n == num_custom_pieces:
break
n_elem_custom_packs.append(custom_pack)
custom_pack_waste = stock_piece_length - custom_pack_used_length
# TODO: move to function
if cutting_schema.has_key(custom_pack_waste):
if custom_pack not in cutting_schema[custom_pack_waste]:
cutting_schema[custom_pack_waste].append(custom_pack)
else:
cutting_schema[custom_pack_waste] = [custom_pack]
l = l + 1
k = k + 1
num_elem = num_elem + 1
prev_custom_packs = list(n_elem_custom_packs)
i = i + 1
custom_pieces_ids = []
bestfit_cutting_schema = {}
for waste in sorted(cutting_schema.iterkeys()):
for custom_pack in cutting_schema[waste]:
already_packed = False
for custom_piece in custom_pack:
custom_piece_id = custom_piece[0]
if custom_piece_id in custom_pieces_ids:
already_packed = True
break
if not already_packed:
custom_piece_lengths = []
custom_pack_length = 0
for custom_piece in custom_pack:
custom_piece_id = custom_piece[0]
custom_piece_length = custom_piece[1]
custom_pieces_ids.append(custom_piece_id)
# use remaining length from any of the previous schemas if possible
reused_bestfit_custom_pack = []
remaining_waste = 0
for bestfit_waste in sorted(bestfit_cutting_schema.iterkeys()):
i = 0
for bestfit_custom_pack in bestfit_cutting_schema[bestfit_waste]:
if custom_piece_length <= bestfit_waste:
bestfit_custom_pack.append(custom_piece_length)
reused_bestfit_custom_pack = list(bestfit_custom_pack)
remaining_waste = bestfit_waste - custom_piece_length
waste = waste + custom_piece_length
break
i = i + 1
if reused_bestfit_custom_pack:
break
if not reused_bestfit_custom_pack:
custom_piece_lengths.append(custom_piece_length)
custom_pack_length = custom_pack_length + custom_piece_length
else:
custom_packs = bestfit_cutting_schema[bestfit_waste]
if len(custom_packs) == 1:
del bestfit_cutting_schema[bestfit_waste]
else:
del bestfit_cutting_schema[bestfit_waste][i]
if bestfit_cutting_schema.has_key(remaining_waste):
bestfit_cutting_schema[remaining_waste].append(reused_bestfit_custom_pack)
else:
bestfit_cutting_schema[remaining_waste] = [reused_bestfit_custom_pack]
if custom_piece_lengths:
if bestfit_cutting_schema.has_key(waste):
bestfit_cutting_schema[waste].append(custom_piece_lengths)
else:
bestfit_cutting_schema[waste] = [custom_piece_lengths]
if len(custom_pieces_ids) != num_custom_pieces:
raise CalculationError()
cutting_plan = {'schema': [] }
i = 1
total_waste = 0
for bestfit_waste in sorted(bestfit_cutting_schema.iterkeys()):
for bestfit_custom_pack in bestfit_cutting_schema[bestfit_waste]:
augmented_custom_pack = []
for piece in bestfit_custom_pack:
if piece == stock_piece_length:
augmented_custom_pack.append(piece)
else:
augmented_custom_pack.append(piece - cut_thickness)
cutting_plan['schema'].append({'id': i, 'pack': augmented_custom_pack, 'waste': bestfit_waste})
total_waste = total_waste + bestfit_waste
i = i + 1
cutting_plan['total_stock_num'] = i - 1
cutting_plan['total_waste'] = total_waste
return cutting_plan
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^solver$', '1d-cutter.views.cutting_plan_solver'),
url(r'^1d-cutter/solver', '1d-cutter.views.cutting_plan_solver'),
url(r'^1d-cutter$', '1d-cutter.views.init'),
url(r'^1d-cutter/', '1d-cutter.views.init'),
)
| Python |
# Django settings for app project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'd_@#2nc9-&05aacj5r=z*$_lru)@c2ir%^8tb-t1qwpk6a&h+8'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = '1d-cutter.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"c:/job_stuff/prj/1d-cutter/templates",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'1d-cutter',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| Python |
from django.views.generic.simple import direct_to_template
from solver import *
def init(request):
return direct_to_template(request, '1d-cutter/1d_cutting_plan_form.html', {})
def cutting_plan_solver(request):
order_num = request.GET['order_num']
stock_type = request.GET['stock_type']
stock_length = int(request.GET['stock_length'])
cut_thickness = int(request.GET['cut_thickness'])
custom_lengths = []
custom_lengths_str = request.GET['custom_lengths'].replace(',', ' ')
for custom_length in custom_lengths_str.split(' '):
try:
custom_length_ext = int(custom_length) + cut_thickness
if custom_length_ext > stock_length:
custom_length_ext = stock_length
custom_lengths.append(custom_length_ext)
except ValueError:
pass
try:
cutting_plan = get_1d_cutting_plan(stock_length, custom_lengths, cut_thickness)
return direct_to_template(request, '1d-cutter/1d_cutting_plan_result.html',
{'order_num': order_num,
'stock_type': stock_type,
'cutting_plan': cutting_plan})
except CalculationError:
return direct_to_template(request, '1d-cutter/1d_cutting_plan_error.html', {'error': 'CalculationError'})
except Exception, e:
return direct_to_template(request, '1d-cutter/1d_cutting_plan_error.html', {'error': e})
| Python |
import time, urllib , sys, threading
workers = []
pendingurls = []
def ex(line):
if "http://" in line: #and (".pls" in line.lower() or ".m3u" in line.lower()):
url = line.split("'")[1].replace("''", "")
pendingurls.append(url)
class Worker(threading.Thread):
def run(self):
while pendingurls:
try:
ok = False
url = pendingurls.pop()
target = urllib.urlopen(url)
if target.getcode() == 200:
ok = True
for line in target:
if ("<html" in line.lower()):
ok = False
break
if not ok:
print "-------------------"
print url
print "-------------------"
#else:
# print self.n ,"ok"
except:
print "-------------------"
print url
print "-------------------"
workers.remove(self)
pendingurls = []
print "parsing file"
plsfile = open("radios.pas")
for line in plsfile:
ex(line)
plsfile.close()
print "starting threads"
for i in range(10):
worker = Worker()
workers.append(worker)
worker.start()
print "waiting threads"
while workers:
time.sleep(1)
print len(pendingurls),"remaining"
print "done!!!"
raw_input()
| Python |
import struct, string, time, os
pasw = 704
dst = []
def crypt(text):
text = text.replace("http://", "")
key = len(text) % 10
result = ""
for i in xrange(len(text)):
result += chr( ( ord(text[i]) ^ ( (pasw * (i+1)) + key ) ) % 256)
return result
def writeint8(num):
data = struct.pack("B",num)
dst.append(data)
def writestring(text):
l = len(text)
data = struct.pack("B" + str(l) + "s",l,text)
dst.append(data)
def getarraysize(line):
return int(line[line.find("..") + 2 : line.find("]")]) + 1
def getarraycontent(line):
return line[line.find("'") + 1 : line.rfind("'")].replace("''","'")
def error(msg):
print 'Houston, we have a problem'
print msg
raw_input()
bParse = False
iLevel = -2
genres = []
chn = []
pls = []
totalcount = 0
tStart = time.clock()
srcfile = open("radios.pas", "r")
# -2 genrelist array
# -1 content
# 0 chn_ array
# 1 content
# 2 pls_ array
# 3 content
for line in srcfile:
if "// " in line: # comented line
continue
if "const" in line:
bParse = True
elif ");" in line:
bParse = False
if iLevel < 3:
iLevel += 1
else:
iLevel = 0
# check if both lists have same size
if len(chn) <> len(pls):
error("%s chn=%d pls=%d" % (genres[0], len(chn), len(pls)))
slist = [] # a list that we will sort
for i1, i2 in zip(chn,pls):
slist.append((i1,i2))
chn = []
pls = []
slist.sort()
totalcount += len(slist)
print "%s %d" % (genres[0], len(slist))
# write to file
dst.append('\n');
dst.append('+' + genres.pop(0) + '\n')
for i1, i2 in slist:
dst.append('-' + i1 + '\n')
dst.append('1' + i2 + '\n')
elif bParse:
if iLevel == -2:
size = getarraysize(line)
print "%d genres" % size
iLevel += 1
elif iLevel == -1:
genres.append(getarraycontent(line))
elif iLevel in (0,2):
iLevel += 1
elif iLevel == 1:
chn.append(getarraycontent(line))
elif iLevel == 3:
pls.append(getarraycontent(line))
dst = "".join(dst)
srcfile.close()
dstfile = open("result.txt","w")
dstfile.writelines(dst)
dstfile.close()
print "OK, %d radios converted and saved in %fs" % (totalcount, time.clock() - tStart)
raw_input()
| Python |
import struct, string, time, os
pasw = 704
dst = []
def crypt(text):
text = text.replace("http://", "")
return text
#key = len(text) % 10
#result = ""
#for i in xrange(len(text)):
# result += chr( ( ord(text[i]) ^ ( (pasw * (i+1)) + key ) ) % 256)
#return result
def writeint8(num):
data = struct.pack("B",num)
dst.append(data)
def writestring(text):
l = len(text)
data = struct.pack("B" + str(l) + "s",l,text)
dst.append(data)
def getarraysize(line):
return int(line[line.find("..") + 2 : line.find("]")]) + 1
def getarraycontent(line):
return line[line.find("'") + 1 : line.rfind("'")].replace("''","'")
def error(msg):
print 'Houston, we have a problem'
print msg
raw_input()
bParse = False
iLevel = -2
genres = []
chn = []
pls = []
totalcount = 0
tStart = time.clock()
srcfile = open("radios.pas", "r")
dstfile = open("db.dat", "wb")
# -2 genrelist array
# -1 content
# 0 chn_ array
# 1 content
# 2 pls_ array
# 3 content
for line in srcfile:
if "// " in line: # comented line
continue
if "const" in line:
bParse = True
elif ");" in line:
bParse = False
if iLevel < 3:
iLevel += 1
else:
iLevel = 0
# check if both lists have same size
if len(chn) <> len(pls):
error("%s chn=%d pls=%d" % (genres[0], len(chn), len(pls)))
slist = [] # a list that we will sort
for i1, i2 in zip(chn,pls):
slist.append((i1,i2))
chn = []
pls = []
slist.sort()
totalcount += len(slist)
print "%s %d" % (genres[0], len(slist))
# write to file
writestring(genres.pop(0))
writeint8(len(slist))
for i1, i2 in slist:
writestring(i1)
writestring(crypt(i2))
elif bParse:
if iLevel == -2:
size = getarraysize(line)
print "%d genres" % size
writeint8(size)
iLevel += 1
elif iLevel == -1:
genres.append(getarraycontent(line))
elif iLevel in (0,2):
iLevel += 1
elif iLevel == 1:
chn.append(getarraycontent(line))
elif iLevel == 3:
pls.append(getarraycontent(line))
dst = "".join(dst)
dstfile.write(dst)
dstfile.close()
srcfile.close()
dstsize = len(dst)
dstfile = open("../engine/db.inc","w")
dstfile.write("const dbdata : array[0..%d] of Byte = (\n" % (dstsize -1 ,))
srcpos = 0
for c in dst:
if srcpos > 0:
dstfile.write(",")
if srcpos % 12 == 0:
dstfile.write("\n")
dstfile.write(str(ord(c)))
srcpos += 1
dstfile.write("\n);");
dstfile.close()
print "OK, %d radios sorted and saved in %fs" % (totalcount, time.clock() - tStart)
raw_input()
| Python |
extlist = [".bk1",".bk2",".$$$",".local",".a",".tmp",".drc",".o",".cfg",".ddp",
".stat",".pec2bac",".identcache",".dcu",".ppu",".depend",".layout",".win"] #put extensions to delete
import sys, os, subprocess
print "START THE CLEARING PROCESS"
print "DELETING FILES WITH THE FOLLOWING EXT"
print extlist
i = 0
for root, dirs, files in os.walk(os.getcwd()):
for file in files:
#for ext in extlist:
fileext = os.path.splitext(file)[1]
if fileext in extlist:
filepath = os.path.join(root,file)
print filepath
os.remove(filepath)
i+=1
print "%d files found and deleted" % i
print "Exiting..."
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: models.py
Type: Class definition
Last modified: 2010-05-24 22:27
Description:
This file contains a class that would turn a dictionary containing user
information as was returned by json request into a T163User class. So we may
more convienently retrieve user information.
"""
from utils import parse_timestring
class T163UserBase(object):
"""
Basic user information that can be public accessible.
User dictionary = {
profile_image_url_large 用户资料的大图片 80*80
id 用户id
profile_image_url_small 用户资料的小图片 24*24
verified 已验证(为名人预留的?)
reply_type 回复类型
profile_sidebar_fill_color
profile_text_color
followers_count fo这个用户的人数
location 这个用户所处的地理位置
profile_background_color
utc_offset
statuses_count 用户微博数
description 用户个人描述
friends_count 这个用户fo的人数
profile_link_color
profile_image_url
profile_background_image_url 用户资料的小图片 48*48
screen_name 屏幕显示名, 更常用
profile_background_tile
favourites_count
name 用户名
url 链接
gender 性别
created_at 注册时间
time_zone 时区
profile_sidebar_border_color
"""
def __init__(self, userdict):
"""
Long and tedious initilization process.
"""
# account information
self.id = userdict['id']
self.screen_name = userdict['screen_name']
self.name = userdict['name']
# user profile
self.url = userdict['url']
self.description = userdict['description']
self.location = userdict['location']
# following information
self.followers_count = userdict['followers_count']
self.statuses_count = userdict['statuses_count']
self.friends_count = userdict['friends_count']
self.favourites_count = userdict['favourites_count']
# Gender is made more human readable.
if userdict['gender'] == 0:
self.gender = 'M'
elif userdict['gender'] == 1:
self.gender = 'F'
else:
self.gender = 'U'
# Created_at is translated into a python datetime object.
self.created_at = parse_timestring(userdict['created_at'])
# these are not implemented yet, so we comment'em'out.
"""
# account information
self.verified = userdict['verified']
# user profile
self.time_zone = userdict['time_zone']
self.utc_offset = userdict['utc_offset']
# avatar image urls.
self.image_large = userdict['profile_image_url_large']
self.image_medium = userdict['profile_image_url']
self.image_small = userdict['profile_image_url_small']
# user homepage appearance.
self.profile_sidebar_fill_color = \
userdict['profile_sidebar_fill_color']
self.profile_text_color = userdict['profile_text_color']
self.profile_background_color = userdict['profile_background_color']
self.profile_link_color = userdict['profile_link_color']
self.profile_background_image_url = \
userdict['profile_background_image_url']
self.profile_background_tile = userdict['profile_background_tile']
self.profile_sidebar_border_color = \
userdict['profile_sidebar_border_color']
# unknown...
self.reply_type = userdict['reply_type']
"""
class User(T163UserBase):
"""
Additional user information is stored in the following dictionary:
User dictionary = {
telephone 用户手机号
email 用户邮箱
}
"""
def __init__(self, userdict):
T163UserBase.__init__(self, userdict)
# additional user profile
self.telephone = userdict['telephone']
self.email = userdict['email']
class Follower(T163UserBase):
"""
This class is used to store information for followers, apart from those
properties defined in T163UserBase, this class has the following
information:
Follower dictionary = {
followed_by 这个用户是否在follow你
status 这个用户最新的一条推的详细信息
following 你是否在follow这个用户
"""
def __init__(self, userdict):
T163UserBase.__init__(self, userdict)
self.followed_by = userdict['followed_by']
self.status = userdict['status']
self.following = userdict['following']
class T163StatusBase(object):
"""
This class is used to store basic status information. The status
information is provided in a dictionary:
Status dictionary = {
user_id 用户id
truncated 未知
text 推内容
created_at 发推时间
retweet_status_id 最初的被retweeted的消息的id.
source 网易微博
in_reply_to_status_id None
in_reply_to_screen_name None
in_reply_to_user_id None
type 未知
id 本消息id
}
"""
def __init__(self, status_dict):
self.user_id = status_dict['user_id']
self.text = status_dict['text']
self.created_at = parse_timestring(status_dict['created_at'])
self.retweet_status_id = status_dict['retweet_status_id']
self.source = status_dict['source']
self.id = status_dict['id']
self.in_reply_to_status_id = status_dict['in_reply_to_status_id']
self.in_reply_to_screen_name = status_dict['in_reply_to_screen_name']
self.in_reply_to_user_id = status_dict['in_reply_to_user_id']
# these are not implemented yet, so we comment'em'out.
"""
self.truncated = status_dict['truncated']
self.type = status_dict['type']
"""
class Status(T163StatusBase):
"""
This class is for the show() api, which is used to show the detailed
information for a tweet.
Additional information:
favorited False
in_reply_to_status_text None
favorited_at None
in_reply_to_user_name None
user
"""
def __init__(self, status_dict):
T163StatusBase.__init__(self, status_dict)
self.user = T163UserBase(status_dict['user'])
# these are not implemented yet, so we comment'em'out.
"""
self.favorited = status_dict['favorited']
self.in_reply_to_status_text = status_dict['in_reply_to_status_text']
self.favorited_at = status_dict['favorited_at']
self.in_reply_to_user_name = status_dict['in_reply_to_user_name']
"""
class StatusWithIpaddr(T163StatusBase):
"""
This class is for the show() api, which is used to show the detailed
information for a tweet.
Additional information:
auditStatus 未知
ipaddr 117.84.92.50
"""
def __init__(self, status_dict):
T163StatusBase.__init__(self, status_dict)
self.ipaddr = status_dict['ipaddr']
class DirectMessage(object):
"""
sender_screen_name corleone
followed_by True
sender T163UserBaseObject
text 测试内容啊啊
created_at Tue Apr 27 20:40:58 +0800 2010
sender_id -5127315299555819730
recipient_id 6493809605159984224
recipient_screen_name xiaket
recipient T163UserBaseObject
id 7950999978748591002
"""
def __init__(self, messagedict):
# message
self.id = messagedict['id']
self.text = messagedict['text']
self.created_at = parse_timestring(messagedict['created_at'])
# sender
self.sender = T163UserBase(messagedict['sender'])
self.sender_id = messagedict['sender_id']
self.sender_screen_name = messagedict['sender_screen_name']
self.followed = messagedict['followed_by']
# recipient
self.recipient = T163UserBase(messagedict['recipient'])
self.recipient_id = messagedict['recipient_id']
self.recipient_screen_name = messagedict['recipient_screen_name']
class SearchHit(T163StatusBase):
"""
This class is for the show() api, which is used to show the detailed
information for a tweet.
Additional information:
favorited False
favorited_at None
in_reply_to_user_name None
user
"""
def __init__(self, status_dict):
T163StatusBase.__init__(self, status_dict)
self.user = T163UserBase(status_dict['user'])
# these are not implemented yet, so we comment'em'out.
"""
self.favorited = status_dict['favorited']
self.favorited_at = status_dict['favorited_at']
self.in_reply_to_user_name = status_dict['in_reply_to_user_name']
"""
class SearchResult(object):
"""
totalHits 14973
next_page
completed_in 0
availHits 600
refresh_url
since_id 0
results_per_page 30
result
query
max_id 0
page 1
"""
def __init__(self, result_dict):
self.totalHits = result_dict['totalHits']
self.next_page = result_dict['next_page']
self.completed_in = result_dict['completed_in']
self.availHits = result_dict['availHits']
self.refresh_url = result_dict['refresh_url']
self.since_id = result_dict['since_id']
self.results_per_page = result_dict['results_per_page']
self.result = []
for item in result_dict['result']:
self.result.append(SearchHit(item))
self.query = result_dict['query']
self.max_id = result_dict['max_id']
self.page = result_dict['page']
class UserSearchResult(object):
"""
totalHits number
availHits number
result list
"""
def __init__(self, result_dict):
self.totalHits = result_dict['totalHits']
self.availHits = result_dict['availHits']
self.result = []
for item in result_dict['result']:
self.result.append(UserSearchHit(item))
class UserSearchHit(T163UserBase):
"""
Additional information stored in the search result:
telephone always null
email always null
status StatusWithIpaddr
following False
}
"""
def __init__(self, userdict):
T163UserBase.__init__(self, userdict)
# additional user profile
self.status = StatusWithIpaddr(userdict['status'])
self.following = userdict['following']
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: utils.py
Type: Utility
Last modified: 2010-07-18 14:06
Description:
Utility functions for this project.
"""
import locale
import os
import urllib2
from datetime import datetime, timedelta
##############
# Exceptions #
##############
class AuthenticationError(RuntimeError):
"""
Exception caused by incorrect username/password.
"""
pass
class UserNotFound(ValueError):
"""
Exception caused by querying a user that does not exist.
"""
pass
class IllegalCall(ValueError):
"""
Exception caused by illegal call, e.g., trying to remove other people's
status, or a message longer than 163 characters.
"""
pass
class UnknownError(RuntimeError):
"""
Unexpected HTTP code returned.
"""
pass
##############
# Decorators #
##############
def require_login(func):
"""
This is a decorator inspired by a similar decorator in Django.
"""
def morewrapped(func):
def wrapped(kls, *args, **kwargs):
if not kls.logged_in:
kls._login()
return func(kls, *args, **kwargs)
return wrapped
return morewrapped(func)
def check_screen_name(func):
"""
This decorator would check the screen_name in the parameter of the original
function.
It is to be noted that the screen must be the first argument if we are
using a positional parameter.
"""
def morewrapped(func):
def wrapped(kls, *args, **kwargs):
if 'screen_name' in kwargs:
_screen_name = kwargs['screen_name']
elif len(args):
_screen_name = args[0]
else:
_screen_name = None
if _screen_name:
# If the screen_name is set, we shall check if it is a valid
# screen_name. We do this by visiting the homepage of this
# screen_name:
_url = "/users/show.json?screen_name=%s" % _screen_name
_message = "Specified user does not exist."
_err_dict = {
404: (UserNotFound, _message),
}
kls.request(_url, errors=_err_dict)
return func(kls, *args, **kwargs)
return wrapped
return morewrapped(func)
def check_status_id(func):
"""
This decorator would check the screen_name in the parameter of the original
function.
It is to be noted that the screen must be the first argument if we are
using a positional parameter.
"""
def morewrapped(func):
def wrapped(kls, *args, **kwargs):
if 'status_id' in kwargs:
_status_id = kwargs['status_id']
elif len(args) != 0:
_status_id = args[0]
kls.show_status(_status_id)
return func(kls, *args, **kwargs)
return wrapped
return morewrapped(func)
def parse_timestring(timestring):
"""
Accept a time string, parse it and return a datetime object.
>>> parse_timestring("Mon Apr 26 10:49:29 +0800 2010")
datetime.datetime(2010, 4, 26, 2, 49, 29)
>>> parse_timestring("Mon Apr 26 10:49:29 -0800 2010")
datetime.datetime(2010, 4, 26, 18, 49, 29)
>>> parse_timestring("Mon Apr 26 10:49:29 +0830 2010")
datetime.datetime(2010, 4, 26, 2, 19, 29)
"""
oldlocale = locale.getlocale(locale.LC_TIME)
# On different OS platform, setlocale would have to be called differently.
if os.name =='nt':
locale.setlocale(locale.LC_TIME, 'english')
elif os.name =='posix':
locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
strf = timestring[:20] + timestring[26:]
created_at = datetime.strptime(strf, "%a %b %d %H:%M:%S %Y")
# set it back.
locale.setlocale(locale.LC_TIME, oldlocale)
delta = timestring[20:25]
hour = int(delta[:3])
minute = int(delta[3:])
return created_at - timedelta(hours=hour, minutes=minute)
class RedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
"""
For the moment, t.163.com would not return 404 status code correctly.
Instead, it would return a 302 and redirect user to a page that will
display 404 information. This would make web user happy, but we have to
do extra to make our API elegant. Thus we have this handler to
correctly raise 404 code.
"""
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
if headers['location'] == 'http://t.163.com/notfound':
raise urllib2.HTTPError(req.get_full_url(), 404, msg, headers, fp)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: session.py
Type: Class definitions
Last modified: 2010-07-18 14:10
Description:
Official APIs(2010.04.27)
-------------------------
/friendships/show.json done
/statuses/followers/{screen_name}.json?page={page} done
/statuses/friends/{screen_name}.json?page={page} done
/friendships/create/{screen_name}.json done
/friendships/destroy/{screen_name}.json done
/direct_messages/new.json done
/direct_messages/destroy/{id}.json done
/direct_messages.json?since_id={since_id}&count={count} done
/direct_messages/sent.json?since_id={since_id}&count={count} done
/search.json?q=xx&p=yy&t=zz done
/1/user/search.json?q=xx&p=yy&t=zz done
/statuses/home_timeline.json done
/statuses/mentions.json done
/statuses/user_timeline/{screen_name}.json done
/statuses/update.json done
/statuses/show/{id}.json done
/statuses/destroy/{id}.json done
/favorites/{screen_name}.json done
/favorites/create/{id}.json done
/favorites/destroy/{id}.json done
/account/verify_credentials.json done
Unofficial APIs
---------------
these url are known to be working on the web:
/account/recommend done
Features from some twitter client benchmark page which is helpful here:
Image upload future
Profile Views future
Twitter Trends future
Follower Blocking future
Evolution:
0.2c
----
Clean up json format and add block support.
0.3a
----
Image upload future
Twitter Trends future
Follower Blocking future
Profile Views future
"""
import cookielib
import json
import sys
import urllib2
from urllib import unquote, urlencode
from models import T163UserBase, User, Follower, Status, DirectMessage
from models import SearchResult, SearchHit, UserSearchResult, UserSearchHit
from utils import require_login, check_screen_name, check_status_id
from utils import RedirectHandler
from utils import AuthenticationError, UserNotFound, IllegalCall, UnknownError
API_HOST = "http://api.t.163.com"
class T163Session(object):
"""
A netease micro-blog API implementation in python.
"""
def __init__(self, **kwargs):
"""
Initialization method for this class.
This init method would do the account authentication. This is not
strictly required to use the APIs, by since most of them would required
that the user is ahthenticated, I see no point not do it outside the
init method.
we want to take your username and password, of course.
If you do not provide a password, we shall try to search current path
for a valid cookie file.
Then, if you want to use an existing cookie file, you can specify the
path to the file.
Username is your full email address, we won't do any guess work here.
Please note that if you have a valid cookie file, your password would
not be used in the __init__ process.
"""
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
self.cookiefile = kwargs.get('cookiefile', None)
if not self.cookiefile:
if not self.username:
raise AuthenticationError
else:
self.cookiefile = "%s.txt" % self.username
self.username_checked = False
self.logged_in = False
self.cookiejar = cookielib.MozillaCookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self.cookiejar),
RedirectHandler(),
)
self.user = self._init_user()
self.screen_name = self.user.screen_name
#####################################################
# Authentication functions. #
#####################################################
# These method do not need to be called explicitly. #
# They are called implicitly in __init__. #
#####################################################
def _has_local_cookie(self):
"""
We try to load the cookie file. If we can load it, then we are good to
go. Were there any exceptions, we consider otherwise.
"""
try:
self.cookiejar.load(self.cookiefile)
return True
except:
return False
def _check_username(self):
"""
Utility function to check whether the provided username is a valid
netease passport.
"""
_url = "http://t.163.com/account/passport/check"
_parameter = urlencode({'userName': self.username})
_response = urllib2.urlopen(_url, _parameter).readlines()[0]
_response = json.loads(_response)
if _response['status'] == '0':
self.username_checked = True
else:
_message = "User(%s) does not exist." % self.username
raise AuthenticationError, _message
def _login(self):
"""
login logic:
if we can find a local cookie:
set up certain variables.
quit this function, since we are done.
else:
check username if necessary
login.
write cookie.
set up flag.
The actual log process is a little dull, though.
"""
if self._has_local_cookie():
self.username_checked = True
self.logged_in = True
return
if not self.username_checked:
self._check_username()
_url = 'https://reg.163.com/logins.jsp'
_data = {
'username': self.username,
'password': self.password,
'savelogin': '1',
'url': API_HOST + '/session/first',
'product': 't',
'type': '1',
}
_parameter = urlencode(_data)
_f = self.opener.open(_url, _parameter)
def _find_next_url(lines):
"""
This function was used to tranlate junks like h into human
readable characters.
This is hardcode, I don't like this. So if you can do this the nice
way, using some official libraries in python, please tell me.
"""
for line in lines:
if line.find('<a href=') != -1:
linkline = line
break
# if the provided username and password is valid, we should see a
# redirection page here, if not, we shall see something like:
# <div><a href="http://reg.163.com/" target="_self"> </a></div>
if linkline.find('reg.163.com') != -1:
raise AuthenticationError
start = linkline.index("ref='") + 5
end = linkline.index("'><", start)
reallink = ''
for numstr in linkline[start:end].split('&#'):
if numstr == '':
continue
else:
hexstr = hex(int(numstr))
reallink += ('%' + hexstr.replace('0x', ''))
return unquote(reallink)
newurl = _find_next_url(_f.readlines())
self.opener.open(newurl).read()
self.cookiejar.save(self.cookiefile)
self.logged_in = True
@require_login
def _init_user(self):
"""
This function would retrieve user's screen_name from his/her homepage.
"""
#return User(self.json("/account/verify_credentials.json"))
return User(self.json("/user/info.json"))
###################
# HTTP functions. #
###################
def request(self, url, **kwargs):
"""
This function would handle all http communications.
parameters:
url: The url to be retrieved.(required)
data: HTTP POST data.(optional)
errors: user/method provided exception handler.(optional)
If data is not provided, we shall set it to None and do an HTTP GET
request, if otherwise, we shall do an HTTP POST with the data provided,
even if the data is an empty dictionary.
When a method have to handle HTTP error code, they send a dictionary
here, containing the error code to be treated and the exception to be
raised.
"""
# parse kwargs:
data = kwargs.get('data', None)
errors = kwargs.get('errors', {})
try:
if data != None:
# Do an HTTP POST if data is provided.
encoded_data = urlencode(data)
_file = self.opener.open(API_HOST + url, encoded_data)
else:
# If no data is provided, we use HTTP GET instead of
# HTTP POST.
_file = self.opener.open(API_HOST + url)
return _file.readlines()
except urllib2.HTTPError, error:
httpcode = error.getcode()
if (not errors) or (httpcode not in errors):
# no exception handler provided, or provided handler do not
# mean to handle this kind of http status code. We just raise
# an UnknownError.
raise UnknownError
else:
exception_type = errors[httpcode][0]
exception_message = errors[httpcode][1]
raise exception_type, exception_message
def json(self, url, **kwargs):
"""
For json file requests, we can use this function instead of
self.request, since only one line is responsed for these requests.
We move even further by returning a parsed object instead of a string.
"""
return json.loads(self.request(url, **kwargs)[0])
#######################
# shortcut functions. #
#######################
# done
def get_statuses(self, url, count, since_id=None, max_id=None, **kwargs):
"""
We frequently need to get a json file and parse it and return it as a
list of Status objects, so here we are.
"""
if count > 200 or count < 0:
count = 30
_url = url + "?count=%s" % count
if since_id:
_url += "&since_id=%s" % since_id
if max_id:
_url += "&max_id=%s" % max_id
status_list = []
for status_dict in self.json(_url, **kwargs):
status_list.append(Status(status_dict))
return status_list
def home_timeline_since(self, date_time):
"""
This function would return your home timeline since a date, which is
more friendly than the api provided below.
Accept a python datetime object as parameter, return every message
between now and then.
This may take some time, you are warned!
"""
pass
def user_timeline_since(self, screen_name, date_time):
"""
This function would return someone's timeline since a date, which is
more friendly than the api provided below.
Take a screen_name and a python datetime object as parameter,
return every message between now and then.
This may take some time, you are warned!
"""
pass
def user_home_timeline(self, screen_name):
"""
This function is usually very time consuming.
This function would first find who this user is following, and then
retrieve those people's statuses, and arrange them chronologically and
finally return them. We shall only retrieve the first page of those
people statuses.
"""
pass
# done
def i_am_following(self, screen_name):
"""
This function would return whether the current user is following some
one specified by screen_name.
"""
return show_friendship(target_screen_name=screen_name)[0]
# done
def i_am_followed_by(self, screen_name):
"""
This function would show whether the current user is followed by
someone specified by screen_name.
"""
return show_friendship(target_screen_name=screen_name)[1]
# done
def i_am_friends_with(self, screen_name):
"""
This function would show whether the current user and the user
specified by screen_name is following each other.
"""
return all(show_friendship(target_screen_name=screen_name))
def inbox(self):
"""
This function would return every direct message in the user's inbox,
this may take some time...
"""
pass
def outbox(self):
"""
This function would return every direct message sent by current user.
this may take some time...
"""
pass
def retweet(self, message_id):
"""
This function would retweet a message.
"""
pass
def reply(self, status, message_id, source=None):
"""
This function would reply a message.
It is tested that, in order to correctly reply a status, you have to
add an @ and the user's name at the front of your status.
"""
return self.update(status, source, message_id)
def myrecommend(self):
"""
This function would find who your friends are following.
"""
pass
###########################################################
## API specific functions: following ##
###########################################################
## /friendships/show.json ##
## /statuses/followers/{screen_name}.json?page={page} ##
## /statuses/friends/{screen_name}.json?page={page} ##
## /friendships/create/{screen_name}.json ##
## /friendships/destroy/{screen_name}.json ##
###########################################################
def show_friendship(self,
source_id=None, source_screen_name=None,
target_id=None, target_screen_name=None):
"""
This function would show the follow relationship between two people.
There are four possible of situations:
source unfo target, target unfo source: False, False
source fo target, target unfo source: True, False
source unfo target, target fo source: False, True
source fo target, target fo source: True, True
"""
_url = "/friendships/show.json?"
if (not source_id) and (not source_screen_name):
_url += "source_id=%s" % self.user.id
elif source_id:
_url += "source_id=%s" % source_id
else:
_url += "source_screen_name=%s" % source_screen_name
if (not target_id) and (not target_screen_name):
_message = "No target specified!"
raise IllegalCall, _message
elif target_id:
_url += "&target_id=%s" % target_id
else:
_url += "&target_screen_name=%s" % target_screen_name
_message = "Specified user does not exist."
_err_dict = {
404: (UserNotFound, _message),
}
_dict = self.json(_url, errors=_err_dict)
return _dict['source']['following'], _dict['source']['followed_by']
@check_screen_name
def followers(self, screen_name=None, page=None):
"""
This function would show the followers of someone specified by
screen_name.
This function would return a list of Follower objects.
Warning:
Current server implementation would return current user's followers
if the request user specified by screen_name is not found, it's a bit
odd. We worked this around by checking the screen_name before doint the
real request.
"""
if not screen_name:
screen_name = self.screen_name
if page:
_url = "/statuses/followers/%s.json?page=%s" % (screen_name, page)
else:
_url = "/statuses/followers/%s.json" % screen_name
followers_list = []
for item in self.json(_url):
followers_list.append(Follower(item))
return followers_list
@check_screen_name
def friends(self, screen_name=None, page=None):
"""
This function would show the people someone is following.
This function would return a list of Follower objects.
Warning:
Current server implementation would return current user's friends
if the request user specified by screen_name is not found, it's a bit
odd. We worked this around by checking the screen_name before doint the
real request.
"""
if not screen_name:
screen_name = self.screen_name
if page:
_url = "/statuses/friends/%s.json?page=%s" % (screen_name, page)
else:
_url = "/statuses/friends/%s.json" % screen_name
friends_list = []
for item in self.json(_url):
friends_list.append(Follower(item))
return friends_list
@check_screen_name
def create_friendship(self, screen_name):
"""
Follow someone specified by screen_name.
"""
_url = "/friendships/create/%s.json" % screen_name
_message1 = "User not logged in, but why should that happen"
_message3 = "You are blocked by this person, or I think you know why."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
# This request require a POST method, so we send an empty dictionary.
self.request(_url, data={}, errors=_err_dict)
@check_screen_name
def destroy_friendship(self, screen_name):
"""
Un-follow someone.
"""
_url = "/friendships/destroy/%s.json" % screen_name
_message1 = "User not logged in, but why should that happen"
_message3 = "Not following this person."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
self.request(_url, data={}, errors=_err_dict)
######################################################################
## API specific functions: directmessage ##
######################################################################
## /direct_messages/new.json ##
## /direct_messages/destroy/{id}.json ##
## /direct_messages.json?since_id={since_id}&count={count} ##
## /direct_messages/sent.json?since_id={since_id}&count={count} ##
######################################################################
def new_direct_message(self, screen_name, text):
"""
Send a direct message to some user specified by user's screen_name.
This function would return the sent message as a DirectMessage object.
Server would handle this API's 403 correctly.
"""
if type(text) == type("str"):
text = text.decode("UTF-8")
_url = "/direct_messages/new.json"
if len(text) > 163 or len(text) == 0:
_message = "Your message is either too long or too short."
raise IllegalCall, _message
_dict = {'text': text.encode("UTF-8"), 'user': screen_name}
_message1 = "User not logged in, or illegal message length."
_message3 = "You are not follow by receiver, or I think you know why."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
_message = self.json(_url, data=_dict, errors=_err_dict)
return DirectMessage(_message)
def destroy_direct_message(self, message_id):
"""
Delete a direct message specified by its id.
"""
_message1 = "User not logged in, but why should that happen"
_message3 = "Message id is invalid."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
_url = "/direct_messages/destroy/%s.json" % message_id
return DirectMessage(self.json(_url, data={}, errors=_err_dict))
def direct_messages(self, since_id=None, count=None):
"""
Inbox for current user's direct messages.
"""
_url = '/direct_messages.json?since_id=%s&count=%s' % (since_id, count)
_message1 = "User not logged in, but why should that happen"
_err_dict = {
401: (UnknownError, _message1),
}
direct_message_list = []
for item in self.json(_url, errors=_err_dict):
direct_message_list.append(DirectMessage(item))
return direct_message_list
def sent_direct_messages(self, since_id=None, count=None):
"""
Outbox for current user's direct messages.
"""
_url = '/direct_messages/sent.json?since_id=%s&count=%s' % (since_id, count)
_message1 = "User not logged in, but why should that happen"
_err_dict = {
401: (UnknownError, _message1),
}
direct_message_list = []
for item in self.json(_url, errors=_err_dict):
direct_message_list.append(DirectMessage(item))
return direct_message_list
######################################################################
## API specific functions: searching ##
######################################################################
## /search.json?q=xx&p=yy&t=zz ##
## /1/user/search.json?q=xx&p=yy&t=zz ##
######################################################################
def search(self, query, page=None, type=None):
"""
Search statuses by keyword.
q is for query word, p is for page.
"""
if not type:
_url = '/search.json?q=%s&t=recent' % query
else:
_url = '/search.json?q=%s&t=%s' % (query, type)
if page:
_url += '&p=%s' % page
return SearchResult(self.json(_url))
def user_search(self, query, page=None):
"""
Search username by keyword.
"""
_url = '/1/user/search.json?q=%s' % query
if page:
_url += '&p=%s' % page
return UserSearchResult(self.json(_url))
##########################################################
## API specific functions: status ##
##########################################################
## /statuses/home_timeline.json ##
## /statuses/mentions.json ##
## /statuses/user_timeline/{screen_name}.json ##
## /statuses/update.json ##
## /statuses/show/{id}.json ##
## /statuses/destroy/{id}.json ##
##########################################################
def home_timeline(self, count=30, since_id=None, max_id=None):
"""
Return the statuses on your homepage when you are using the web.
"""
_url = "/statuses/home_timeline.json"
return self.get_statuses(_url, count, since_id, max_id)
def mentions(self, count=30, since_id=None, max_id=None):
"""
This function would return a list of status objects, which is the
messages mentioned the current user.
"""
_url = "/statuses/mentions.json?count=%s"
return self.get_statuses(_url, count, since_id)
def user_timeline(self, screen_name=None, count=30, since_id=None):
"""
This function would return a user's timeline, including his/her
statuses, mentions and retweets.
"""
if not screen_name:
screen_name = self.screen_name
_url = "/statuses/user_timeline/%s.json" % screen_name
_message = "Specified user does not exist."
_err_dict = {
404: (UserNotFound, _message),
}
return self.get_statuses(_url, count, since_id, errors=_err_dict)
def update(self, status, source=None,
in_reply_to_status_id=None, retweet_status_id=None):
"""
Post a new status.
"""
# Fix possible type error here.
if type(status) == type("str"):
status = status.decode("UTF-8")
_url = "/statuses/update.json"
if len(status) > 163:
_message = "Your message is too long..."
raise IllegalCall, _message
status_dict = {'status': status.encode("UTF-8")}
if source:
status_dict['source'] = source
else:
status_dict['source'] = "WEB"
if in_reply_to_status_id:
status_dict['in_reply_to_status_id'] = in_reply_to_status_id
if retweet_status_id:
status_dict['retweet_status_id'] = retweet_status_id
_message = "Your message is too long..."
_err_dict = {
403: (IllegalCall, _message),
500: (UnknownError, "Server error?"),
}
return Status(self.json(_url, data=status_dict, errors=_err_dict))
def show_status(self, status_id):
"""
This function would return a status object specified by its id.
"""
_url = "/statuses/show/%s.json" % status_id
_err_dict = {
404: (IllegalCall, "Specified status not found."),
}
return Status(self.json(_url, errors=_err_dict))
def destroy_status(self, status_id):
"""
Delete a status specified by an id.
"""
_url = "/statuses/destroy/%s.json" % status_id
_message = "Specified status do not exist, or you do not own it."
_err_dict = {
404: (IllegalCall, _message),
}
self.request(_url, data={}, errors=_err_dict)
######################################################################
## API specific functions: favorites ##
######################################################################
## /favorites/{screen_name}.json ##
## /favorites/create/{id}.json ##
## /favorites/destroy/{id}.json ##
######################################################################
@check_screen_name
def favorites(self, screen_name=None, count=30, since_id=None):
"""
This function would return a list of status objects, which is the
messages collected by the current user.
Similar to self.followers, server would treat this request incorrectly.
So we are in need of the check_screen_name decorator again.
"""
if not screen_name:
screen_name = self.screen_name
_url = "/favorites/%s.json" % screen_name
return self.get_statuses(_url, count, since_id)
def create_favorite(self, status_id):
"""
Add a status to favorite specified by an id.
"""
_url = "/favorites/create/%s.json" % status_id
_err_dict = {
404: (IllegalCall, "Specified status is not found."),
}
self.request(_url, data={}, errors=_err_dict)
@check_status_id
def destroy_favorite(self, status_id):
"""
Delete a status from favorites specified by an id.
Since the server would return an empty list no matter what id you send
to it, we need to check the status's id manually before destroy the
favorites relationship.
"""
_url = "/favorites/destroy/%s.json" % status_id
self.request(_url, data={})
#############################################################
## API specific functions: misc ##
#############################################################
## /account/recommend ##
#############################################################
def recommend(self):
"""
The url used in this function would return a json containing recommend
users for the current user.
This function would return a list of User objects recommend for the
current user to follow.
"""
userlist = self.json("/account/recommend")['userlist']
Userlist = []
for userdict in userlist:
user = User(userdict)
Userlist.append(user)
return Userlist
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: __init__.py
Type: Module meta information holder
Last modified: 2010-05-16 20:44
Description:
"""
__author__ = "xiaket"
__version__ = "0.2b"
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: models.py
Type: Class definition
Last modified: 2010-05-24 22:27
Description:
This file contains a class that would turn a dictionary containing user
information as was returned by json request into a T163User class. So we may
more convienently retrieve user information.
"""
from utils import parse_timestring
class T163UserBase(object):
"""
Basic user information that can be public accessible.
User dictionary = {
profile_image_url_large 用户资料的大图片 80*80
id 用户id
profile_image_url_small 用户资料的小图片 24*24
verified 已验证(为名人预留的?)
reply_type 回复类型
profile_sidebar_fill_color
profile_text_color
followers_count fo这个用户的人数
location 这个用户所处的地理位置
profile_background_color
utc_offset
statuses_count 用户微博数
description 用户个人描述
friends_count 这个用户fo的人数
profile_link_color
profile_image_url
profile_background_image_url 用户资料的小图片 48*48
screen_name 屏幕显示名, 更常用
profile_background_tile
favourites_count
name 用户名
url 链接
gender 性别
created_at 注册时间
time_zone 时区
profile_sidebar_border_color
"""
def __init__(self, userdict):
"""
Long and tedious initilization process.
"""
# account information
self.id = userdict['id']
self.screen_name = userdict['screen_name']
self.name = userdict['name']
# user profile
self.url = userdict['url']
self.description = userdict['description']
self.location = userdict['location']
# following information
self.followers_count = userdict['followers_count']
self.statuses_count = userdict['statuses_count']
self.friends_count = userdict['friends_count']
self.favourites_count = userdict['favourites_count']
# Gender is made more human readable.
if userdict['gender'] == 0:
self.gender = 'M'
elif userdict['gender'] == 1:
self.gender = 'F'
else:
self.gender = 'U'
# Created_at is translated into a python datetime object.
self.created_at = parse_timestring(userdict['created_at'])
# these are not implemented yet, so we comment'em'out.
"""
# account information
self.verified = userdict['verified']
# user profile
self.time_zone = userdict['time_zone']
self.utc_offset = userdict['utc_offset']
# avatar image urls.
self.image_large = userdict['profile_image_url_large']
self.image_medium = userdict['profile_image_url']
self.image_small = userdict['profile_image_url_small']
# user homepage appearance.
self.profile_sidebar_fill_color = \
userdict['profile_sidebar_fill_color']
self.profile_text_color = userdict['profile_text_color']
self.profile_background_color = userdict['profile_background_color']
self.profile_link_color = userdict['profile_link_color']
self.profile_background_image_url = \
userdict['profile_background_image_url']
self.profile_background_tile = userdict['profile_background_tile']
self.profile_sidebar_border_color = \
userdict['profile_sidebar_border_color']
# unknown...
self.reply_type = userdict['reply_type']
"""
class User(T163UserBase):
"""
Additional user information is stored in the following dictionary:
User dictionary = {
telephone 用户手机号
email 用户邮箱
}
"""
def __init__(self, userdict):
T163UserBase.__init__(self, userdict)
# additional user profile
self.telephone = userdict['telephone']
self.email = userdict['email']
class Follower(T163UserBase):
"""
This class is used to store information for followers, apart from those
properties defined in T163UserBase, this class has the following
information:
Follower dictionary = {
followed_by 这个用户是否在follow你
status 这个用户最新的一条推的详细信息
following 你是否在follow这个用户
"""
def __init__(self, userdict):
T163UserBase.__init__(self, userdict)
self.followed_by = userdict['followed_by']
self.status = userdict['status']
self.following = userdict['following']
class T163StatusBase(object):
"""
This class is used to store basic status information. The status
information is provided in a dictionary:
Status dictionary = {
user_id 用户id
truncated 未知
text 推内容
created_at 发推时间
retweet_status_id 最初的被retweeted的消息的id.
source 网易微博
in_reply_to_status_id None
in_reply_to_screen_name None
in_reply_to_user_id None
type 未知
id 本消息id
}
"""
def __init__(self, status_dict):
self.user_id = status_dict['user_id']
self.text = status_dict['text']
self.created_at = parse_timestring(status_dict['created_at'])
self.retweet_status_id = status_dict['retweet_status_id']
self.source = status_dict['source']
self.id = status_dict['id']
self.in_reply_to_status_id = status_dict['in_reply_to_status_id']
self.in_reply_to_screen_name = status_dict['in_reply_to_screen_name']
self.in_reply_to_user_id = status_dict['in_reply_to_user_id']
# these are not implemented yet, so we comment'em'out.
"""
self.truncated = status_dict['truncated']
self.type = status_dict['type']
"""
class Status(T163StatusBase):
"""
This class is for the show() api, which is used to show the detailed
information for a tweet.
Additional information:
favorited False
in_reply_to_status_text None
favorited_at None
in_reply_to_user_name None
user
"""
def __init__(self, status_dict):
T163StatusBase.__init__(self, status_dict)
self.user = T163UserBase(status_dict['user'])
# these are not implemented yet, so we comment'em'out.
"""
self.favorited = status_dict['favorited']
self.in_reply_to_status_text = status_dict['in_reply_to_status_text']
self.favorited_at = status_dict['favorited_at']
self.in_reply_to_user_name = status_dict['in_reply_to_user_name']
"""
class StatusWithIpaddr(T163StatusBase):
"""
This class is for the show() api, which is used to show the detailed
information for a tweet.
Additional information:
auditStatus 未知
ipaddr 117.84.92.50
"""
def __init__(self, status_dict):
T163StatusBase.__init__(self, status_dict)
self.ipaddr = status_dict['ipaddr']
class DirectMessage(object):
"""
sender_screen_name corleone
followed_by True
sender T163UserBaseObject
text 测试内容啊啊
created_at Tue Apr 27 20:40:58 +0800 2010
sender_id -5127315299555819730
recipient_id 6493809605159984224
recipient_screen_name xiaket
recipient T163UserBaseObject
id 7950999978748591002
"""
def __init__(self, messagedict):
# message
self.id = messagedict['id']
self.text = messagedict['text']
self.created_at = parse_timestring(messagedict['created_at'])
# sender
self.sender = T163UserBase(messagedict['sender'])
self.sender_id = messagedict['sender_id']
self.sender_screen_name = messagedict['sender_screen_name']
self.followed = messagedict['followed_by']
# recipient
self.recipient = T163UserBase(messagedict['recipient'])
self.recipient_id = messagedict['recipient_id']
self.recipient_screen_name = messagedict['recipient_screen_name']
class SearchHit(T163StatusBase):
"""
This class is for the show() api, which is used to show the detailed
information for a tweet.
Additional information:
favorited False
favorited_at None
in_reply_to_user_name None
user
"""
def __init__(self, status_dict):
T163StatusBase.__init__(self, status_dict)
self.user = T163UserBase(status_dict['user'])
# these are not implemented yet, so we comment'em'out.
"""
self.favorited = status_dict['favorited']
self.favorited_at = status_dict['favorited_at']
self.in_reply_to_user_name = status_dict['in_reply_to_user_name']
"""
class SearchResult(object):
"""
totalHits 14973
next_page
completed_in 0
availHits 600
refresh_url
since_id 0
results_per_page 30
result
query
max_id 0
page 1
"""
def __init__(self, result_dict):
self.totalHits = result_dict['totalHits']
self.next_page = result_dict['next_page']
self.completed_in = result_dict['completed_in']
self.availHits = result_dict['availHits']
self.refresh_url = result_dict['refresh_url']
self.since_id = result_dict['since_id']
self.results_per_page = result_dict['results_per_page']
self.result = []
for item in result_dict['result']:
self.result.append(SearchHit(item))
self.query = result_dict['query']
self.max_id = result_dict['max_id']
self.page = result_dict['page']
class UserSearchResult(object):
"""
totalHits number
availHits number
result list
"""
def __init__(self, result_dict):
self.totalHits = result_dict['totalHits']
self.availHits = result_dict['availHits']
self.result = []
for item in result_dict['result']:
self.result.append(UserSearchHit(item))
class UserSearchHit(T163UserBase):
"""
Additional information stored in the search result:
telephone always null
email always null
status StatusWithIpaddr
following False
}
"""
def __init__(self, userdict):
T163UserBase.__init__(self, userdict)
# additional user profile
self.status = StatusWithIpaddr(userdict['status'])
self.following = userdict['following']
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: tests.py
Type: Unit test module
Last modified: 2010-05-21 16:51
Description:
This file contains unit test cases for this project.
"""
import os
import time
import unittest
from models import T163UserBase, User, Follower, Status, DirectMessage
from models import SearchResult, SearchHit, UserSearchResult, UserSearchHit
from session import T163Session as Session
from utils import AuthenticationError, UserNotFound, IllegalCall
# This FAKE_USER should not be available as an 163 account.
FAKE_USER = "xiakai.nju@gmail.com"
FAKE_PASS = "fakepass"
# This should be a valid account, preferably, at your disposal. If you do not
# have another account, at least find some account that is following you. and
# you are not following. The following test case would assume that the
# TEST_ACCOUNT is following the account that you give to __init__.
# TEST_ACCOUNT_ALT should be someone who is not following you.
TEST_ACCOUNT = "xiaket"
TEST_ACCOUNT_ALT = "zhangjiawei"
# This should be an invalid screen_name
FAKE_SCREEN_NAME = "aslfkqwlalsdlfalkw"
# This should be an invalid user/status id.
FAKE_ID = "398066"
# These are test message contents, variables with prefix FAKE are longer than
# 163 characters, while variables with prefix TEST lay within the constraint.
FAKE_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*7
TEST_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*6
FAKE_MESSAGE_CN = u"中文消息长度测试 "*19
TEST_MESSAGE_CN = u"中文消息长度测试 "*18
class SessionTests(unittest.TestCase):
"""
This class would test authentication related cases. In summary, we have:
1. No username provided.
2. Invalid username
3. Invalid username/password pair.
4. Invalid cookie file.
5. Invalid cookie file/username pair.
6. Valid cookie file removed after Session initilization.
"""
def __init__(self, testname, username=None, password=None, fulltest=False):
"""
Over default __init__ method to allow import of username and password.
Setup username and passwords.
Some tests are somehow dangerous, we do them with a fulltest flag.
"""
super(SessionTests, self).__init__(testname)
self.username = username
self.password = password
self.fulltest = fulltest
if fulltest:
print "performing full test."
def session_init(self):
"""
This function would test the __init__ method of Session.
There sure be no cookie file both before and after this test case.
"""
# If neither username nor cookiefile is given to __init__. An
# AuthenticationError would no doubt be raised.
self.assertRaises(AuthenticationError, Session)
# Should raise an AuthenticationError if an invalid username is
# provided.
self.assertRaises(AuthenticationError, Session, username=FAKE_USER)
# This should not raise Exceptions, since it is a valid
# username/password pair.
Session(username=self.username, password=self.password)
# This should not raise Exceptions, even though neither username nor
# password is provided.
Session(cookiefile="%s.txt" % self.username)
# Since we have a valid cookie file now, anything given as username or
# password would be ignored, so the following example would work.
Session(cookiefile="%s.txt" % self.username, username=FAKE_USER)
# Remove existing cookie file before we continue. The cookie should
# have been created before.
os.remove("%s.txt" % self.username)
# This would cause AuthenticationError, since the cookiefile provided
# does not exist.
self.assertRaises(
AuthenticationError,
Session,
cookiefile="%s.txt" % FAKE_USER,
)
# This would work, since a fallback username/password scheme would
# work. But this would save cookie to "%s.txt" % FAKE_USER, instead of
# "%s.txt" % self.username. So we shall remove the cookiefile after
# this test.
Session(
cookiefile="%s.txt" % FAKE_USER,
username=self.username,
password=self.password,
)
os.remove("%s.txt" % FAKE_USER)
# This should raise AuthenticationError, since it is an invalid
# username/password pair.
# CAUTION: This is dangerous. Frequent test of the following test would
# lock your valid account up.
if self.fulltest:
self.assertRaises(
AuthenticationError,
Session,
username=self.username,
password=FAKE_PASS,
)
def relation_api(self):
"""
This function would test the relationship related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
##########################
# self.show_friendship #
#------------------------#
# /friendships/show.json #
##########################
# Calling Session.show_friendship(target_screen_name=FAKE_SCREEN_NAME)
# should cause an exception.
self.assertRaises(
UserNotFound,
session.show_friendship,
target_screen_name=FAKE_SCREEN_NAME,
)
# This time, the target is valid, while the source_id is invalid. It
# should also cause an exception.
self.assertRaises(
UserNotFound,
session.show_friendship,
source_id=FAKE_ID,
target_screen_name=TEST_ACCOUNT,
)
# This time, no target is provided, this should cause another
# exception.
self.assertRaises(
IllegalCall,
session.show_friendship,
source_id=FAKE_ID,
)
# This should work, giving the following relationship between
# TEST_ACCOUNT and self.username.
fo, foed = session.show_friendship(
target_screen_name=TEST_ACCOUNT
)
self.assertTrue(
foed,
"TEST_ACCOUNT:%s should follow %s" % (TEST_ACCOUNT, self.username),
)
##########################################
# self.followers #
#----------------------------------------#
# /statuses/followers/{screen_name}.json #
##########################################
# User current user's screen_name by default, so this should equal.
self.assertEqual(
[user.id for user in session.followers()],
[user.id for user in session.followers(session.screen_name)],
)
# This should never return an empty list, since at least TEST_ACCOUNT
# is following self.username. Items in the list should be an instance
# of Follower.
followers = session.followers(session.screen_name)
self.assertTrue(isinstance(followers[0], Follower))
# Test again, TEST_ACCOUNT should be following self.username
self.assertTrue(TEST_ACCOUNT in [u.screen_name for u in followers])
# This should raise an exception, for the screen_name is invalid.
self.assertRaises(
UserNotFound,
session.followers,
screen_name=FAKE_SCREEN_NAME,
)
# The above should work for both positional arguments and keyword
# arguments.
self.assertRaises(
UserNotFound,
session.followers,
FAKE_SCREEN_NAME,
)
##########################################
# self.friends #
#----------------------------------------#
# /statuses/friends/{screen_name}.json #
##########################################
# This should give a list of Follower objects.
friends = session.friends(TEST_ACCOUNT)
self.assertTrue(isinstance(friends[0], Follower))
# Since an almost identical API is well tested(I hope!) above, I see no
# point repeating it here.
############################################
# self.create_friendship #
#------------------------------------------#
# /friendships/create/{screen_name}.json #
############################################
# As before, this function is decorated with check_screen_name.
# So we shall get a UserNotFound with an invalid screen_name.
self.assertRaises(
UserNotFound,
session.create_friendship,
FAKE_SCREEN_NAME,
)
# Follow yourself would get an IllegalCall exception.
self.assertRaises(
IllegalCall,
session.create_friendship,
session.screen_name,
)
# Before we continue, we shall follow
# This should work.
session.create_friendship(TEST_ACCOUNT)
############################################
# self.destroy_friendship #
#------------------------------------------#
# /friendships/destroy/{screen_name}.json #
############################################
# This should work.
session.destroy_friendship(TEST_ACCOUNT)
# It cannot be done twice:
self.assertRaises(
IllegalCall,
session.destroy_friendship,
TEST_ACCOUNT,
)
# Nor can we unfollow someone who do not exist.
self.assertRaises(
UserNotFound,
session.destroy_friendship,
FAKE_SCREEN_NAME,
)
def mail_api(self):
"""
This function would test the direct message related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.new_direct_message #
#------------------------------------------#
# /direct_messages/new.json #
############################################
# This is an IllegalCall, since the receiver is invalid.
self.assertRaises(
IllegalCall,
session.new_direct_message,
FAKE_SCREEN_NAME,
TEST_MESSAGE_CN,
)
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.new_direct_message,
TEST_ACCOUNT,
FAKE_MESSAGE_CN,
)
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.new_direct_message,
TEST_ACCOUNT,
FAKE_MESSAGE_EN,
)
# This is an IllegalCall, since you cannot send a mail to yourself.
self.assertRaises(
IllegalCall,
session.new_direct_message,
session.screen_name,
TEST_MESSAGE_EN,
)
# This is an IllegalCall, since TEST_ACCOUNT_ALT is not following you.
self.assertRaises(
IllegalCall,
session.new_direct_message,
TEST_ACCOUNT_ALT,
TEST_MESSAGE_EN,
)
# Finally, this should work.
message = session.new_direct_message(TEST_ACCOUNT, TEST_MESSAGE_EN)
self.assertTrue(isinstance(message, DirectMessage))
# A direct message is sent to TEST_ACCOUNT, we shall retrieve the first
# message in outbox and check if it is the same one.
############################################
# self.sent_direct_messages #
#------------------------------------------#
# /direct_messages/sent.json #
############################################
messages = session.sent_direct_messages()
self.assertTrue(isinstance(messages[0], DirectMessage))
self.assertEqual(messages[0].id, message.id)
############################################
# self.destroy_direct_message #
#------------------------------------------#
# /direct_messages/destroy/{id}.json #
############################################
# This is an illegal call, since the id specified is invalid.
self.assertRaises(
IllegalCall,
session.destroy_direct_message,
FAKE_ID,
)
removed_message = session.destroy_direct_message(message.id)
self.assertEqual(removed_message.id, message.id)
# This is an illegal call, since you cannot remove the same direct
# message twice.
self.assertRaises(
IllegalCall,
session.destroy_direct_message,
message.id,
)
############################################
# self.direct_messages #
#------------------------------------------#
# /direct_messages.json #
############################################
# If there are direct messages in your inbox, we shall try to make sure
# that it is a DirectMessage instance.
messages = session.direct_messages()
if len(messages) != 0:
self.assertTrue(isinstance(messages[0], DirectMessage))
def search_api(self):
"""
This function would test the search related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.search #
#------------------------------------------#
# /search.json #
############################################
# This is an IllegalCall, since no query keyword is specified.
self.assertRaises(
TypeError,
session.search,
)
# I hope I'm not a narcissist...
search_result = session.search(TEST_ACCOUNT)
self.assertTrue(isinstance(search_result, SearchResult))
result = search_result.result[0]
self.assertTrue(isinstance(result, SearchHit))
############################################
# self.user_search #
#------------------------------------------#
# /1/user/search.json #
############################################
search_result = session.user_search(TEST_ACCOUNT)
self.assertTrue(isinstance(search_result, UserSearchResult))
result = search_result.result[0]
self.assertTrue(isinstance(result, UserSearchHit))
def favorite_api(self):
"""
This function would test favorite related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.create_favorite #
#------------------------------------------#
# /favorites/create/{id}.json #
############################################
# This is an IllegalCall, since the id is invalid.
self.assertRaises(
IllegalCall,
session.create_favorite,
FAKE_ID,
)
# We shall get a valid message id by looking for the first message in
# some user's timeline.
favorited_status = session.user_timeline(TEST_ACCOUNT)[0]
# Now add this message as favourite.
session.create_favorite(favorited_status.id)
# Now we are ready to test the self.favorites API.
############################################
# self.favorites #
#------------------------------------------#
# /favorites/{screen_name}.json #
############################################
# We shall find the first favorite message and compare the id.
favorite_status = session.favorites()[0]
self.assertTrue(isinstance(favorite_status, Status))
self.assertEqual(favorite_status.id, favorited_status.id)
# This is an UserNotFound, since the screen_name is invalid.
self.assertRaises(
UserNotFound,
session.favorites,
FAKE_SCREEN_NAME,
)
############################################
# self.destroy_favorite #
#------------------------------------------#
# /favorites/destroy/{id}.json #
############################################
# This is an illegal call, since the id specified is invalid.
self.assertRaises(
IllegalCall,
session.destroy_favorite,
FAKE_ID,
)
# This would work
session.destroy_favorite(favorited_status.id)
def status_api(self):
"""
This function would test the status related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.update #
#------------------------------------------#
# /statuses/update.json #
############################################
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.update,
FAKE_MESSAGE_CN,
)
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.update,
FAKE_MESSAGE_EN,
)
newstatus = session.update(TEST_MESSAGE_CN)
self.assertTrue(isinstance(newstatus, Status))
# TODO, when we have fully implemented reply and retweet, we have to
# add more test case here.
################################################
# self.user_timeline #
#----------------------------------------------#
# /statuses/user_timeline/{screen_name}.json #
################################################
# The following screen_name is invalid.
self.assertRaises(
UserNotFound,
session.user_timeline,
FAKE_SCREEN_NAME,
)
# Get the time line. I do not understand why it take so long to refresh
# the timeline. 20 is not enough.
# The following code would sometimes mysteriously fail.
if self.fulltest:
time.sleep(30)
statuses = session.user_timeline()
self.assertEqual(statuses[0].id, newstatus.id)
############################################
# self.show_status #
#------------------------------------------#
# /statuses/show/{id}.json #
############################################
# This is an IllegalCall, since the id is invalid.
self.assertRaises(
IllegalCall,
session.show_status,
FAKE_ID,
)
self.assertEqual(
session.show_status(newstatus.id).text,
TEST_MESSAGE_CN.strip(),
)
############################################
# self.destroy_status #
#------------------------------------------#
# /statuses/destroy/{id}.json #
############################################
# This is IllegalCall, since the message id is invalid.
self.assertRaises(
IllegalCall,
session.destroy_status,
FAKE_ID,
)
# This should work
session.destroy_status(newstatus.id)
# Get the time line. I do not understand why it take so long to refresh
# the timeline. 20 is not enough. 30 would work most of the time.
# The following code would sometimes mysteriously fail.
if self.fulltest:
time.sleep(30)
statuses = session.user_timeline()
self.assertNotEqual(statuses[0].id, newstatus.id)
############################################
# self.home_timeline #
# self.mentions #
#------------------------------------------#
# /statuses/home_timeline.json #
# /statuses/mentions.json #
############################################
# Not much can be done for home timeline and mentions.
statuses = session.home_timeline()
self.assertTrue(isinstance(statuses[0], Status))
statuses = session.mentions()
self.assertTrue(isinstance(statuses[0], Status))
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: utils.py
Type: Utility
Last modified: 2010-07-18 14:06
Description:
Utility functions for this project.
"""
import locale
import os
import urllib2
from datetime import datetime, timedelta
##############
# Exceptions #
##############
class AuthenticationError(RuntimeError):
"""
Exception caused by incorrect username/password.
"""
pass
class UserNotFound(ValueError):
"""
Exception caused by querying a user that does not exist.
"""
pass
class IllegalCall(ValueError):
"""
Exception caused by illegal call, e.g., trying to remove other people's
status, or a message longer than 163 characters.
"""
pass
class UnknownError(RuntimeError):
"""
Unexpected HTTP code returned.
"""
pass
##############
# Decorators #
##############
def require_login(func):
"""
This is a decorator inspired by a similar decorator in Django.
"""
def morewrapped(func):
def wrapped(kls, *args, **kwargs):
if not kls.logged_in:
kls._login()
return func(kls, *args, **kwargs)
return wrapped
return morewrapped(func)
def check_screen_name(func):
"""
This decorator would check the screen_name in the parameter of the original
function.
It is to be noted that the screen must be the first argument if we are
using a positional parameter.
"""
def morewrapped(func):
def wrapped(kls, *args, **kwargs):
if 'screen_name' in kwargs:
_screen_name = kwargs['screen_name']
elif len(args):
_screen_name = args[0]
else:
_screen_name = None
if _screen_name:
# If the screen_name is set, we shall check if it is a valid
# screen_name. We do this by visiting the homepage of this
# screen_name:
_url = "/users/show.json?screen_name=%s" % _screen_name
_message = "Specified user does not exist."
_err_dict = {
404: (UserNotFound, _message),
}
kls.request(_url, errors=_err_dict)
return func(kls, *args, **kwargs)
return wrapped
return morewrapped(func)
def check_status_id(func):
"""
This decorator would check the screen_name in the parameter of the original
function.
It is to be noted that the screen must be the first argument if we are
using a positional parameter.
"""
def morewrapped(func):
def wrapped(kls, *args, **kwargs):
if 'status_id' in kwargs:
_status_id = kwargs['status_id']
elif len(args) != 0:
_status_id = args[0]
kls.show_status(_status_id)
return func(kls, *args, **kwargs)
return wrapped
return morewrapped(func)
def parse_timestring(timestring):
"""
Accept a time string, parse it and return a datetime object.
>>> parse_timestring("Mon Apr 26 10:49:29 +0800 2010")
datetime.datetime(2010, 4, 26, 2, 49, 29)
>>> parse_timestring("Mon Apr 26 10:49:29 -0800 2010")
datetime.datetime(2010, 4, 26, 18, 49, 29)
>>> parse_timestring("Mon Apr 26 10:49:29 +0830 2010")
datetime.datetime(2010, 4, 26, 2, 19, 29)
"""
oldlocale = locale.getlocale(locale.LC_TIME)
# On different OS platform, setlocale would have to be called differently.
if os.name =='nt':
locale.setlocale(locale.LC_TIME, 'english')
elif os.name =='posix':
locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
strf = timestring[:20] + timestring[26:]
created_at = datetime.strptime(strf, "%a %b %d %H:%M:%S %Y")
# set it back.
locale.setlocale(locale.LC_TIME, oldlocale)
delta = timestring[20:25]
hour = int(delta[:3])
minute = int(delta[3:])
return created_at - timedelta(hours=hour, minutes=minute)
class RedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
"""
For the moment, t.163.com would not return 404 status code correctly.
Instead, it would return a 302 and redirect user to a page that will
display 404 information. This would make web user happy, but we have to
do extra to make our API elegant. Thus we have this handler to
correctly raise 404 code.
"""
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
if headers['location'] == 'http://t.163.com/notfound':
raise urllib2.HTTPError(req.get_full_url(), 404, msg, headers, fp)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: tests.py
Type: Unit test module
Last modified: 2010-05-21 16:51
Description:
This file contains unit test cases for this project.
"""
import os
import time
import unittest
from models import T163UserBase, User, Follower, Status, DirectMessage
from models import SearchResult, SearchHit, UserSearchResult, UserSearchHit
from session import T163Session as Session
from utils import AuthenticationError, UserNotFound, IllegalCall
# This FAKE_USER should not be available as an 163 account.
FAKE_USER = "xiakai.nju@gmail.com"
FAKE_PASS = "fakepass"
# This should be a valid account, preferably, at your disposal. If you do not
# have another account, at least find some account that is following you. and
# you are not following. The following test case would assume that the
# TEST_ACCOUNT is following the account that you give to __init__.
# TEST_ACCOUNT_ALT should be someone who is not following you.
TEST_ACCOUNT = "xiaket"
TEST_ACCOUNT_ALT = "zhangjiawei"
# This should be an invalid screen_name
FAKE_SCREEN_NAME = "aslfkqwlalsdlfalkw"
# This should be an invalid user/status id.
FAKE_ID = "398066"
# These are test message contents, variables with prefix FAKE are longer than
# 163 characters, while variables with prefix TEST lay within the constraint.
FAKE_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*7
TEST_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*6
FAKE_MESSAGE_CN = u"中文消息长度测试 "*19
TEST_MESSAGE_CN = u"中文消息长度测试 "*18
class SessionTests(unittest.TestCase):
"""
This class would test authentication related cases. In summary, we have:
1. No username provided.
2. Invalid username
3. Invalid username/password pair.
4. Invalid cookie file.
5. Invalid cookie file/username pair.
6. Valid cookie file removed after Session initilization.
"""
def __init__(self, testname, username=None, password=None, fulltest=False):
"""
Over default __init__ method to allow import of username and password.
Setup username and passwords.
Some tests are somehow dangerous, we do them with a fulltest flag.
"""
super(SessionTests, self).__init__(testname)
self.username = username
self.password = password
self.fulltest = fulltest
if fulltest:
print "performing full test."
def session_init(self):
"""
This function would test the __init__ method of Session.
There sure be no cookie file both before and after this test case.
"""
# If neither username nor cookiefile is given to __init__. An
# AuthenticationError would no doubt be raised.
self.assertRaises(AuthenticationError, Session)
# Should raise an AuthenticationError if an invalid username is
# provided.
self.assertRaises(AuthenticationError, Session, username=FAKE_USER)
# This should not raise Exceptions, since it is a valid
# username/password pair.
Session(username=self.username, password=self.password)
# This should not raise Exceptions, even though neither username nor
# password is provided.
Session(cookiefile="%s.txt" % self.username)
# Since we have a valid cookie file now, anything given as username or
# password would be ignored, so the following example would work.
Session(cookiefile="%s.txt" % self.username, username=FAKE_USER)
# Remove existing cookie file before we continue. The cookie should
# have been created before.
os.remove("%s.txt" % self.username)
# This would cause AuthenticationError, since the cookiefile provided
# does not exist.
self.assertRaises(
AuthenticationError,
Session,
cookiefile="%s.txt" % FAKE_USER,
)
# This would work, since a fallback username/password scheme would
# work. But this would save cookie to "%s.txt" % FAKE_USER, instead of
# "%s.txt" % self.username. So we shall remove the cookiefile after
# this test.
Session(
cookiefile="%s.txt" % FAKE_USER,
username=self.username,
password=self.password,
)
os.remove("%s.txt" % FAKE_USER)
# This should raise AuthenticationError, since it is an invalid
# username/password pair.
# CAUTION: This is dangerous. Frequent test of the following test would
# lock your valid account up.
if self.fulltest:
self.assertRaises(
AuthenticationError,
Session,
username=self.username,
password=FAKE_PASS,
)
def relation_api(self):
"""
This function would test the relationship related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
##########################
# self.show_friendship #
#------------------------#
# /friendships/show.json #
##########################
# Calling Session.show_friendship(target_screen_name=FAKE_SCREEN_NAME)
# should cause an exception.
self.assertRaises(
UserNotFound,
session.show_friendship,
target_screen_name=FAKE_SCREEN_NAME,
)
# This time, the target is valid, while the source_id is invalid. It
# should also cause an exception.
self.assertRaises(
UserNotFound,
session.show_friendship,
source_id=FAKE_ID,
target_screen_name=TEST_ACCOUNT,
)
# This time, no target is provided, this should cause another
# exception.
self.assertRaises(
IllegalCall,
session.show_friendship,
source_id=FAKE_ID,
)
# This should work, giving the following relationship between
# TEST_ACCOUNT and self.username.
fo, foed = session.show_friendship(
target_screen_name=TEST_ACCOUNT
)
self.assertTrue(
foed,
"TEST_ACCOUNT:%s should follow %s" % (TEST_ACCOUNT, self.username),
)
##########################################
# self.followers #
#----------------------------------------#
# /statuses/followers/{screen_name}.json #
##########################################
# User current user's screen_name by default, so this should equal.
self.assertEqual(
[user.id for user in session.followers()],
[user.id for user in session.followers(session.screen_name)],
)
# This should never return an empty list, since at least TEST_ACCOUNT
# is following self.username. Items in the list should be an instance
# of Follower.
followers = session.followers(session.screen_name)
self.assertTrue(isinstance(followers[0], Follower))
# Test again, TEST_ACCOUNT should be following self.username
self.assertTrue(TEST_ACCOUNT in [u.screen_name for u in followers])
# This should raise an exception, for the screen_name is invalid.
self.assertRaises(
UserNotFound,
session.followers,
screen_name=FAKE_SCREEN_NAME,
)
# The above should work for both positional arguments and keyword
# arguments.
self.assertRaises(
UserNotFound,
session.followers,
FAKE_SCREEN_NAME,
)
##########################################
# self.friends #
#----------------------------------------#
# /statuses/friends/{screen_name}.json #
##########################################
# This should give a list of Follower objects.
friends = session.friends(TEST_ACCOUNT)
self.assertTrue(isinstance(friends[0], Follower))
# Since an almost identical API is well tested(I hope!) above, I see no
# point repeating it here.
############################################
# self.create_friendship #
#------------------------------------------#
# /friendships/create/{screen_name}.json #
############################################
# As before, this function is decorated with check_screen_name.
# So we shall get a UserNotFound with an invalid screen_name.
self.assertRaises(
UserNotFound,
session.create_friendship,
FAKE_SCREEN_NAME,
)
# Follow yourself would get an IllegalCall exception.
self.assertRaises(
IllegalCall,
session.create_friendship,
session.screen_name,
)
# Before we continue, we shall follow
# This should work.
session.create_friendship(TEST_ACCOUNT)
############################################
# self.destroy_friendship #
#------------------------------------------#
# /friendships/destroy/{screen_name}.json #
############################################
# This should work.
session.destroy_friendship(TEST_ACCOUNT)
# It cannot be done twice:
self.assertRaises(
IllegalCall,
session.destroy_friendship,
TEST_ACCOUNT,
)
# Nor can we unfollow someone who do not exist.
self.assertRaises(
UserNotFound,
session.destroy_friendship,
FAKE_SCREEN_NAME,
)
def mail_api(self):
"""
This function would test the direct message related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.new_direct_message #
#------------------------------------------#
# /direct_messages/new.json #
############################################
# This is an IllegalCall, since the receiver is invalid.
self.assertRaises(
IllegalCall,
session.new_direct_message,
FAKE_SCREEN_NAME,
TEST_MESSAGE_CN,
)
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.new_direct_message,
TEST_ACCOUNT,
FAKE_MESSAGE_CN,
)
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.new_direct_message,
TEST_ACCOUNT,
FAKE_MESSAGE_EN,
)
# This is an IllegalCall, since you cannot send a mail to yourself.
self.assertRaises(
IllegalCall,
session.new_direct_message,
session.screen_name,
TEST_MESSAGE_EN,
)
# This is an IllegalCall, since TEST_ACCOUNT_ALT is not following you.
self.assertRaises(
IllegalCall,
session.new_direct_message,
TEST_ACCOUNT_ALT,
TEST_MESSAGE_EN,
)
# Finally, this should work.
message = session.new_direct_message(TEST_ACCOUNT, TEST_MESSAGE_EN)
self.assertTrue(isinstance(message, DirectMessage))
# A direct message is sent to TEST_ACCOUNT, we shall retrieve the first
# message in outbox and check if it is the same one.
############################################
# self.sent_direct_messages #
#------------------------------------------#
# /direct_messages/sent.json #
############################################
messages = session.sent_direct_messages()
self.assertTrue(isinstance(messages[0], DirectMessage))
self.assertEqual(messages[0].id, message.id)
############################################
# self.destroy_direct_message #
#------------------------------------------#
# /direct_messages/destroy/{id}.json #
############################################
# This is an illegal call, since the id specified is invalid.
self.assertRaises(
IllegalCall,
session.destroy_direct_message,
FAKE_ID,
)
removed_message = session.destroy_direct_message(message.id)
self.assertEqual(removed_message.id, message.id)
# This is an illegal call, since you cannot remove the same direct
# message twice.
self.assertRaises(
IllegalCall,
session.destroy_direct_message,
message.id,
)
############################################
# self.direct_messages #
#------------------------------------------#
# /direct_messages.json #
############################################
# If there are direct messages in your inbox, we shall try to make sure
# that it is a DirectMessage instance.
messages = session.direct_messages()
if len(messages) != 0:
self.assertTrue(isinstance(messages[0], DirectMessage))
def search_api(self):
"""
This function would test the search related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.search #
#------------------------------------------#
# /search.json #
############################################
# This is an IllegalCall, since no query keyword is specified.
self.assertRaises(
TypeError,
session.search,
)
# I hope I'm not a narcissist...
search_result = session.search(TEST_ACCOUNT)
self.assertTrue(isinstance(search_result, SearchResult))
result = search_result.result[0]
self.assertTrue(isinstance(result, SearchHit))
############################################
# self.user_search #
#------------------------------------------#
# /1/user/search.json #
############################################
search_result = session.user_search(TEST_ACCOUNT)
self.assertTrue(isinstance(search_result, UserSearchResult))
result = search_result.result[0]
self.assertTrue(isinstance(result, UserSearchHit))
def favorite_api(self):
"""
This function would test favorite related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.create_favorite #
#------------------------------------------#
# /favorites/create/{id}.json #
############################################
# This is an IllegalCall, since the id is invalid.
self.assertRaises(
IllegalCall,
session.create_favorite,
FAKE_ID,
)
# We shall get a valid message id by looking for the first message in
# some user's timeline.
favorited_status = session.user_timeline(TEST_ACCOUNT)[0]
# Now add this message as favourite.
session.create_favorite(favorited_status.id)
# Now we are ready to test the self.favorites API.
############################################
# self.favorites #
#------------------------------------------#
# /favorites/{screen_name}.json #
############################################
# We shall find the first favorite message and compare the id.
favorite_status = session.favorites()[0]
self.assertTrue(isinstance(favorite_status, Status))
self.assertEqual(favorite_status.id, favorited_status.id)
# This is an UserNotFound, since the screen_name is invalid.
self.assertRaises(
UserNotFound,
session.favorites,
FAKE_SCREEN_NAME,
)
############################################
# self.destroy_favorite #
#------------------------------------------#
# /favorites/destroy/{id}.json #
############################################
# This is an illegal call, since the id specified is invalid.
self.assertRaises(
IllegalCall,
session.destroy_favorite,
FAKE_ID,
)
# This would work
session.destroy_favorite(favorited_status.id)
def status_api(self):
"""
This function would test the status related APIs.
"""
# Initialize a session.
session = Session(username=self.username, password=self.password)
############################################
# self.update #
#------------------------------------------#
# /statuses/update.json #
############################################
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.update,
FAKE_MESSAGE_CN,
)
# This is an IllegalCall, since the message is too long.
self.assertRaises(
IllegalCall,
session.update,
FAKE_MESSAGE_EN,
)
newstatus = session.update(TEST_MESSAGE_CN)
self.assertTrue(isinstance(newstatus, Status))
# TODO, when we have fully implemented reply and retweet, we have to
# add more test case here.
################################################
# self.user_timeline #
#----------------------------------------------#
# /statuses/user_timeline/{screen_name}.json #
################################################
# The following screen_name is invalid.
self.assertRaises(
UserNotFound,
session.user_timeline,
FAKE_SCREEN_NAME,
)
# Get the time line. I do not understand why it take so long to refresh
# the timeline. 20 is not enough.
# The following code would sometimes mysteriously fail.
if self.fulltest:
time.sleep(30)
statuses = session.user_timeline()
self.assertEqual(statuses[0].id, newstatus.id)
############################################
# self.show_status #
#------------------------------------------#
# /statuses/show/{id}.json #
############################################
# This is an IllegalCall, since the id is invalid.
self.assertRaises(
IllegalCall,
session.show_status,
FAKE_ID,
)
self.assertEqual(
session.show_status(newstatus.id).text,
TEST_MESSAGE_CN.strip(),
)
############################################
# self.destroy_status #
#------------------------------------------#
# /statuses/destroy/{id}.json #
############################################
# This is IllegalCall, since the message id is invalid.
self.assertRaises(
IllegalCall,
session.destroy_status,
FAKE_ID,
)
# This should work
session.destroy_status(newstatus.id)
# Get the time line. I do not understand why it take so long to refresh
# the timeline. 20 is not enough. 30 would work most of the time.
# The following code would sometimes mysteriously fail.
if self.fulltest:
time.sleep(30)
statuses = session.user_timeline()
self.assertNotEqual(statuses[0].id, newstatus.id)
############################################
# self.home_timeline #
# self.mentions #
#------------------------------------------#
# /statuses/home_timeline.json #
# /statuses/mentions.json #
############################################
# Not much can be done for home timeline and mentions.
statuses = session.home_timeline()
self.assertTrue(isinstance(statuses[0], Status))
statuses = session.mentions()
self.assertTrue(isinstance(statuses[0], Status))
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: session.py
Type: Class definitions
Last modified: 2010-07-18 14:10
Description:
Official APIs(2010.04.27)
-------------------------
/friendships/show.json done
/statuses/followers/{screen_name}.json?page={page} done
/statuses/friends/{screen_name}.json?page={page} done
/friendships/create/{screen_name}.json done
/friendships/destroy/{screen_name}.json done
/direct_messages/new.json done
/direct_messages/destroy/{id}.json done
/direct_messages.json?since_id={since_id}&count={count} done
/direct_messages/sent.json?since_id={since_id}&count={count} done
/search.json?q=xx&p=yy&t=zz done
/1/user/search.json?q=xx&p=yy&t=zz done
/statuses/home_timeline.json done
/statuses/mentions.json done
/statuses/user_timeline/{screen_name}.json done
/statuses/update.json done
/statuses/show/{id}.json done
/statuses/destroy/{id}.json done
/favorites/{screen_name}.json done
/favorites/create/{id}.json done
/favorites/destroy/{id}.json done
/account/verify_credentials.json done
Unofficial APIs
---------------
these url are known to be working on the web:
/account/recommend done
Features from some twitter client benchmark page which is helpful here:
Image upload future
Profile Views future
Twitter Trends future
Follower Blocking future
Evolution:
0.2c
----
Clean up json format and add block support.
0.3a
----
Image upload future
Twitter Trends future
Follower Blocking future
Profile Views future
"""
import cookielib
import json
import sys
import urllib2
from urllib import unquote, urlencode
from models import T163UserBase, User, Follower, Status, DirectMessage
from models import SearchResult, SearchHit, UserSearchResult, UserSearchHit
from utils import require_login, check_screen_name, check_status_id
from utils import RedirectHandler
from utils import AuthenticationError, UserNotFound, IllegalCall, UnknownError
API_HOST = "http://api.t.163.com"
class T163Session(object):
"""
A netease micro-blog API implementation in python.
"""
def __init__(self, **kwargs):
"""
Initialization method for this class.
This init method would do the account authentication. This is not
strictly required to use the APIs, by since most of them would required
that the user is ahthenticated, I see no point not do it outside the
init method.
we want to take your username and password, of course.
If you do not provide a password, we shall try to search current path
for a valid cookie file.
Then, if you want to use an existing cookie file, you can specify the
path to the file.
Username is your full email address, we won't do any guess work here.
Please note that if you have a valid cookie file, your password would
not be used in the __init__ process.
"""
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
self.cookiefile = kwargs.get('cookiefile', None)
if not self.cookiefile:
if not self.username:
raise AuthenticationError
else:
self.cookiefile = "%s.txt" % self.username
self.username_checked = False
self.logged_in = False
self.cookiejar = cookielib.MozillaCookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self.cookiejar),
RedirectHandler(),
)
self.user = self._init_user()
self.screen_name = self.user.screen_name
#####################################################
# Authentication functions. #
#####################################################
# These method do not need to be called explicitly. #
# They are called implicitly in __init__. #
#####################################################
def _has_local_cookie(self):
"""
We try to load the cookie file. If we can load it, then we are good to
go. Were there any exceptions, we consider otherwise.
"""
try:
self.cookiejar.load(self.cookiefile)
return True
except:
return False
def _check_username(self):
"""
Utility function to check whether the provided username is a valid
netease passport.
"""
_url = "http://t.163.com/account/passport/check"
_parameter = urlencode({'userName': self.username})
_response = urllib2.urlopen(_url, _parameter).readlines()[0]
_response = json.loads(_response)
if _response['status'] == '0':
self.username_checked = True
else:
_message = "User(%s) does not exist." % self.username
raise AuthenticationError, _message
def _login(self):
"""
login logic:
if we can find a local cookie:
set up certain variables.
quit this function, since we are done.
else:
check username if necessary
login.
write cookie.
set up flag.
The actual log process is a little dull, though.
"""
if self._has_local_cookie():
self.username_checked = True
self.logged_in = True
return
if not self.username_checked:
self._check_username()
_url = 'https://reg.163.com/logins.jsp'
_data = {
'username': self.username,
'password': self.password,
'savelogin': '1',
'url': API_HOST + '/session/first',
'product': 't',
'type': '1',
}
_parameter = urlencode(_data)
_f = self.opener.open(_url, _parameter)
def _find_next_url(lines):
"""
This function was used to tranlate junks like h into human
readable characters.
This is hardcode, I don't like this. So if you can do this the nice
way, using some official libraries in python, please tell me.
"""
for line in lines:
if line.find('<a href=') != -1:
linkline = line
break
# if the provided username and password is valid, we should see a
# redirection page here, if not, we shall see something like:
# <div><a href="http://reg.163.com/" target="_self"> </a></div>
if linkline.find('reg.163.com') != -1:
raise AuthenticationError
start = linkline.index("ref='") + 5
end = linkline.index("'><", start)
reallink = ''
for numstr in linkline[start:end].split('&#'):
if numstr == '':
continue
else:
hexstr = hex(int(numstr))
reallink += ('%' + hexstr.replace('0x', ''))
return unquote(reallink)
newurl = _find_next_url(_f.readlines())
self.opener.open(newurl).read()
self.cookiejar.save(self.cookiefile)
self.logged_in = True
@require_login
def _init_user(self):
"""
This function would retrieve user's screen_name from his/her homepage.
"""
#return User(self.json("/account/verify_credentials.json"))
return User(self.json("/user/info.json"))
###################
# HTTP functions. #
###################
def request(self, url, **kwargs):
"""
This function would handle all http communications.
parameters:
url: The url to be retrieved.(required)
data: HTTP POST data.(optional)
errors: user/method provided exception handler.(optional)
If data is not provided, we shall set it to None and do an HTTP GET
request, if otherwise, we shall do an HTTP POST with the data provided,
even if the data is an empty dictionary.
When a method have to handle HTTP error code, they send a dictionary
here, containing the error code to be treated and the exception to be
raised.
"""
# parse kwargs:
data = kwargs.get('data', None)
errors = kwargs.get('errors', {})
try:
if data != None:
# Do an HTTP POST if data is provided.
encoded_data = urlencode(data)
_file = self.opener.open(API_HOST + url, encoded_data)
else:
# If no data is provided, we use HTTP GET instead of
# HTTP POST.
_file = self.opener.open(API_HOST + url)
return _file.readlines()
except urllib2.HTTPError, error:
httpcode = error.getcode()
if (not errors) or (httpcode not in errors):
# no exception handler provided, or provided handler do not
# mean to handle this kind of http status code. We just raise
# an UnknownError.
raise UnknownError
else:
exception_type = errors[httpcode][0]
exception_message = errors[httpcode][1]
raise exception_type, exception_message
def json(self, url, **kwargs):
"""
For json file requests, we can use this function instead of
self.request, since only one line is responsed for these requests.
We move even further by returning a parsed object instead of a string.
"""
return json.loads(self.request(url, **kwargs)[0])
#######################
# shortcut functions. #
#######################
# done
def get_statuses(self, url, count, since_id=None, max_id=None, **kwargs):
"""
We frequently need to get a json file and parse it and return it as a
list of Status objects, so here we are.
"""
if count > 200 or count < 0:
count = 30
_url = url + "?count=%s" % count
if since_id:
_url += "&since_id=%s" % since_id
if max_id:
_url += "&max_id=%s" % max_id
status_list = []
for status_dict in self.json(_url, **kwargs):
status_list.append(Status(status_dict))
return status_list
def home_timeline_since(self, date_time):
"""
This function would return your home timeline since a date, which is
more friendly than the api provided below.
Accept a python datetime object as parameter, return every message
between now and then.
This may take some time, you are warned!
"""
pass
def user_timeline_since(self, screen_name, date_time):
"""
This function would return someone's timeline since a date, which is
more friendly than the api provided below.
Take a screen_name and a python datetime object as parameter,
return every message between now and then.
This may take some time, you are warned!
"""
pass
def user_home_timeline(self, screen_name):
"""
This function is usually very time consuming.
This function would first find who this user is following, and then
retrieve those people's statuses, and arrange them chronologically and
finally return them. We shall only retrieve the first page of those
people statuses.
"""
pass
# done
def i_am_following(self, screen_name):
"""
This function would return whether the current user is following some
one specified by screen_name.
"""
return show_friendship(target_screen_name=screen_name)[0]
# done
def i_am_followed_by(self, screen_name):
"""
This function would show whether the current user is followed by
someone specified by screen_name.
"""
return show_friendship(target_screen_name=screen_name)[1]
# done
def i_am_friends_with(self, screen_name):
"""
This function would show whether the current user and the user
specified by screen_name is following each other.
"""
return all(show_friendship(target_screen_name=screen_name))
def inbox(self):
"""
This function would return every direct message in the user's inbox,
this may take some time...
"""
pass
def outbox(self):
"""
This function would return every direct message sent by current user.
this may take some time...
"""
pass
def retweet(self, message_id):
"""
This function would retweet a message.
"""
pass
def reply(self, status, message_id, source=None):
"""
This function would reply a message.
It is tested that, in order to correctly reply a status, you have to
add an @ and the user's name at the front of your status.
"""
return self.update(status, source, message_id)
def myrecommend(self):
"""
This function would find who your friends are following.
"""
pass
###########################################################
## API specific functions: following ##
###########################################################
## /friendships/show.json ##
## /statuses/followers/{screen_name}.json?page={page} ##
## /statuses/friends/{screen_name}.json?page={page} ##
## /friendships/create/{screen_name}.json ##
## /friendships/destroy/{screen_name}.json ##
###########################################################
def show_friendship(self,
source_id=None, source_screen_name=None,
target_id=None, target_screen_name=None):
"""
This function would show the follow relationship between two people.
There are four possible of situations:
source unfo target, target unfo source: False, False
source fo target, target unfo source: True, False
source unfo target, target fo source: False, True
source fo target, target fo source: True, True
"""
_url = "/friendships/show.json?"
if (not source_id) and (not source_screen_name):
_url += "source_id=%s" % self.user.id
elif source_id:
_url += "source_id=%s" % source_id
else:
_url += "source_screen_name=%s" % source_screen_name
if (not target_id) and (not target_screen_name):
_message = "No target specified!"
raise IllegalCall, _message
elif target_id:
_url += "&target_id=%s" % target_id
else:
_url += "&target_screen_name=%s" % target_screen_name
_message = "Specified user does not exist."
_err_dict = {
404: (UserNotFound, _message),
}
_dict = self.json(_url, errors=_err_dict)
return _dict['source']['following'], _dict['source']['followed_by']
@check_screen_name
def followers(self, screen_name=None, page=None):
"""
This function would show the followers of someone specified by
screen_name.
This function would return a list of Follower objects.
Warning:
Current server implementation would return current user's followers
if the request user specified by screen_name is not found, it's a bit
odd. We worked this around by checking the screen_name before doint the
real request.
"""
if not screen_name:
screen_name = self.screen_name
if page:
_url = "/statuses/followers/%s.json?page=%s" % (screen_name, page)
else:
_url = "/statuses/followers/%s.json" % screen_name
followers_list = []
for item in self.json(_url):
followers_list.append(Follower(item))
return followers_list
@check_screen_name
def friends(self, screen_name=None, page=None):
"""
This function would show the people someone is following.
This function would return a list of Follower objects.
Warning:
Current server implementation would return current user's friends
if the request user specified by screen_name is not found, it's a bit
odd. We worked this around by checking the screen_name before doint the
real request.
"""
if not screen_name:
screen_name = self.screen_name
if page:
_url = "/statuses/friends/%s.json?page=%s" % (screen_name, page)
else:
_url = "/statuses/friends/%s.json" % screen_name
friends_list = []
for item in self.json(_url):
friends_list.append(Follower(item))
return friends_list
@check_screen_name
def create_friendship(self, screen_name):
"""
Follow someone specified by screen_name.
"""
_url = "/friendships/create/%s.json" % screen_name
_message1 = "User not logged in, but why should that happen"
_message3 = "You are blocked by this person, or I think you know why."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
# This request require a POST method, so we send an empty dictionary.
self.request(_url, data={}, errors=_err_dict)
@check_screen_name
def destroy_friendship(self, screen_name):
"""
Un-follow someone.
"""
_url = "/friendships/destroy/%s.json" % screen_name
_message1 = "User not logged in, but why should that happen"
_message3 = "Not following this person."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
self.request(_url, data={}, errors=_err_dict)
######################################################################
## API specific functions: directmessage ##
######################################################################
## /direct_messages/new.json ##
## /direct_messages/destroy/{id}.json ##
## /direct_messages.json?since_id={since_id}&count={count} ##
## /direct_messages/sent.json?since_id={since_id}&count={count} ##
######################################################################
def new_direct_message(self, screen_name, text):
"""
Send a direct message to some user specified by user's screen_name.
This function would return the sent message as a DirectMessage object.
Server would handle this API's 403 correctly.
"""
if type(text) == type("str"):
text = text.decode("UTF-8")
_url = "/direct_messages/new.json"
if len(text) > 163 or len(text) == 0:
_message = "Your message is either too long or too short."
raise IllegalCall, _message
_dict = {'text': text.encode("UTF-8"), 'user': screen_name}
_message1 = "User not logged in, or illegal message length."
_message3 = "You are not follow by receiver, or I think you know why."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
_message = self.json(_url, data=_dict, errors=_err_dict)
return DirectMessage(_message)
def destroy_direct_message(self, message_id):
"""
Delete a direct message specified by its id.
"""
_message1 = "User not logged in, but why should that happen"
_message3 = "Message id is invalid."
_err_dict = {
401: (UnknownError, _message1),
403: (IllegalCall, _message3),
}
_url = "/direct_messages/destroy/%s.json" % message_id
return DirectMessage(self.json(_url, data={}, errors=_err_dict))
def direct_messages(self, since_id=None, count=None):
"""
Inbox for current user's direct messages.
"""
_url = '/direct_messages.json?since_id=%s&count=%s' % (since_id, count)
_message1 = "User not logged in, but why should that happen"
_err_dict = {
401: (UnknownError, _message1),
}
direct_message_list = []
for item in self.json(_url, errors=_err_dict):
direct_message_list.append(DirectMessage(item))
return direct_message_list
def sent_direct_messages(self, since_id=None, count=None):
"""
Outbox for current user's direct messages.
"""
_url = '/direct_messages/sent.json?since_id=%s&count=%s' % (since_id, count)
_message1 = "User not logged in, but why should that happen"
_err_dict = {
401: (UnknownError, _message1),
}
direct_message_list = []
for item in self.json(_url, errors=_err_dict):
direct_message_list.append(DirectMessage(item))
return direct_message_list
######################################################################
## API specific functions: searching ##
######################################################################
## /search.json?q=xx&p=yy&t=zz ##
## /1/user/search.json?q=xx&p=yy&t=zz ##
######################################################################
def search(self, query, page=None, type=None):
"""
Search statuses by keyword.
q is for query word, p is for page.
"""
if not type:
_url = '/search.json?q=%s&t=recent' % query
else:
_url = '/search.json?q=%s&t=%s' % (query, type)
if page:
_url += '&p=%s' % page
return SearchResult(self.json(_url))
def user_search(self, query, page=None):
"""
Search username by keyword.
"""
_url = '/1/user/search.json?q=%s' % query
if page:
_url += '&p=%s' % page
return UserSearchResult(self.json(_url))
##########################################################
## API specific functions: status ##
##########################################################
## /statuses/home_timeline.json ##
## /statuses/mentions.json ##
## /statuses/user_timeline/{screen_name}.json ##
## /statuses/update.json ##
## /statuses/show/{id}.json ##
## /statuses/destroy/{id}.json ##
##########################################################
def home_timeline(self, count=30, since_id=None, max_id=None):
"""
Return the statuses on your homepage when you are using the web.
"""
_url = "/statuses/home_timeline.json"
return self.get_statuses(_url, count, since_id, max_id)
def mentions(self, count=30, since_id=None, max_id=None):
"""
This function would return a list of status objects, which is the
messages mentioned the current user.
"""
_url = "/statuses/mentions.json?count=%s"
return self.get_statuses(_url, count, since_id)
def user_timeline(self, screen_name=None, count=30, since_id=None):
"""
This function would return a user's timeline, including his/her
statuses, mentions and retweets.
"""
if not screen_name:
screen_name = self.screen_name
_url = "/statuses/user_timeline/%s.json" % screen_name
_message = "Specified user does not exist."
_err_dict = {
404: (UserNotFound, _message),
}
return self.get_statuses(_url, count, since_id, errors=_err_dict)
def update(self, status, source=None,
in_reply_to_status_id=None, retweet_status_id=None):
"""
Post a new status.
"""
# Fix possible type error here.
if type(status) == type("str"):
status = status.decode("UTF-8")
_url = "/statuses/update.json"
if len(status) > 163:
_message = "Your message is too long..."
raise IllegalCall, _message
status_dict = {'status': status.encode("UTF-8")}
if source:
status_dict['source'] = source
else:
status_dict['source'] = "WEB"
if in_reply_to_status_id:
status_dict['in_reply_to_status_id'] = in_reply_to_status_id
if retweet_status_id:
status_dict['retweet_status_id'] = retweet_status_id
_message = "Your message is too long..."
_err_dict = {
403: (IllegalCall, _message),
500: (UnknownError, "Server error?"),
}
return Status(self.json(_url, data=status_dict, errors=_err_dict))
def show_status(self, status_id):
"""
This function would return a status object specified by its id.
"""
_url = "/statuses/show/%s.json" % status_id
_err_dict = {
404: (IllegalCall, "Specified status not found."),
}
return Status(self.json(_url, errors=_err_dict))
def destroy_status(self, status_id):
"""
Delete a status specified by an id.
"""
_url = "/statuses/destroy/%s.json" % status_id
_message = "Specified status do not exist, or you do not own it."
_err_dict = {
404: (IllegalCall, _message),
}
self.request(_url, data={}, errors=_err_dict)
######################################################################
## API specific functions: favorites ##
######################################################################
## /favorites/{screen_name}.json ##
## /favorites/create/{id}.json ##
## /favorites/destroy/{id}.json ##
######################################################################
@check_screen_name
def favorites(self, screen_name=None, count=30, since_id=None):
"""
This function would return a list of status objects, which is the
messages collected by the current user.
Similar to self.followers, server would treat this request incorrectly.
So we are in need of the check_screen_name decorator again.
"""
if not screen_name:
screen_name = self.screen_name
_url = "/favorites/%s.json" % screen_name
return self.get_statuses(_url, count, since_id)
def create_favorite(self, status_id):
"""
Add a status to favorite specified by an id.
"""
_url = "/favorites/create/%s.json" % status_id
_err_dict = {
404: (IllegalCall, "Specified status is not found."),
}
self.request(_url, data={}, errors=_err_dict)
@check_status_id
def destroy_favorite(self, status_id):
"""
Delete a status from favorites specified by an id.
Since the server would return an empty list no matter what id you send
to it, we need to check the status's id manually before destroy the
favorites relationship.
"""
_url = "/favorites/destroy/%s.json" % status_id
self.request(_url, data={})
#############################################################
## API specific functions: misc ##
#############################################################
## /account/recommend ##
#############################################################
def recommend(self):
"""
The url used in this function would return a json containing recommend
users for the current user.
This function would return a list of User objects recommend for the
current user to follow.
"""
userlist = self.json("/account/recommend")['userlist']
Userlist = []
for userdict in userlist:
user = User(userdict)
Userlist.append(user)
return Userlist
| Python |
#!/usr/bin/env python
#coding=utf-8
"""
Author: Xia Kai <xiaket@gmail.com>
Filename: __init__.py
Type: Module meta information holder
Last modified: 2010-05-16 20:44
Description:
"""
__author__ = "xiaket"
__version__ = "0.2b"
| Python |
import pygtk
pygtk.require('2.0')
import gtk
class TreeViewColumnExample(object):
# close the window and quit
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def __init__(self):
# Create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("TreeViewColumn Example")
self.window.connect("delete_event", self.delete_event)
# create a liststore with one string column to use as the model
self.liststore = gtk.ListStore(str, str, str, 'gboolean')
# create the TreeView using liststore
self.treeview = gtk.TreeView(self.liststore)
# create the TreeViewColumns to display the data
self.tvcolumn = gtk.TreeViewColumn('Pixbuf and Text')
self.tvcolumn1 = gtk.TreeViewColumn('Text Only')
# add a row with text and a stock item - color strings for
# the background
self.liststore.append(['Open', gtk.STOCK_OPEN, 'Open a File', True])
self.liststore.append(['New', gtk.STOCK_NEW, 'New File', True])
self.liststore.append(['Print', gtk.STOCK_PRINT, 'Print File', False])
# add columns to treeview
self.treeview.append_column(self.tvcolumn)
self.treeview.append_column(self.tvcolumn1)
# create a CellRenderers to render the data
self.cellpb = gtk.CellRendererPixbuf()
self.cell = gtk.CellRendererText()
self.cell1 = gtk.CellRendererText()
# set background color property
self.cellpb.set_property('cell-background', 'yellow')
self.cell.set_property('cell-background', 'cyan')
self.cell1.set_property('cell-background', 'pink')
# add the cells to the columns - 2 in the first
self.tvcolumn.pack_start(self.cellpb, False)
self.tvcolumn.pack_start(self.cell, True)
self.tvcolumn1.pack_start(self.cell1, True)
self.tvcolumn.set_attributes(self.cellpb, stock_id=1)
self.tvcolumn.set_attributes(self.cell, text=0)
self.tvcolumn1.set_attributes(self.cell1, text=2,
cell_background_set=3)
# make treeview searchable
self.treeview.set_search_column(0)
# Allow sorting on the column
self.tvcolumn.set_sort_column_id(0)
# Allow drag and drop reordering of rows
self.treeview.set_reorderable(True)
self.window.add(self.treeview)
self.window.show_all()
def main():
gtk.main()
if __name__ == "__main__":
tvcexample = TreeViewColumnExample()
main() | Python |
# MySQL for Python
import MySQLdb
db = MySQLdb.connect (
host = 'localhost',
user = 'root',
passwd = '',
db = 'db_1clic2learn'
#bd = 'mysql'
)
cursor = db.cursor()
cursor.execute('SELECT * FROM db.PL_SQLINJECTION')
result = cursor.fetchall()
if result:
for z in result:
print z | Python |
import sys
import time, tkMessageBox
import Controller
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
########################################################################
class JanelaPrincipal:
def __init__(self):
# Cria uma nova janela (window)
self.janela = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.janela.set_position(gtk.WIN_POS_CENTER)
self.janela.set_title('Scanner 1Clic2Learn' )
self.janela.set_size_request(1050, 700)
self.janela.set_resizable(False)
self.janela.set_icon_name('1Clic2Learn')
self.janela.set_icon_from_file('./1Clic2Learn-3-Icon.ico')
# Adicionando os wigets a janela "
self.fixed = gtk.Fixed()
self.janela.add(self.fixed)
self.fixed.put(gtk.Label('URL da Aplicacao: '), 50, 30)
self.txtURL = gtk.Entry()
self.txtURL.set_size_request(650, 30)
self.fixed.put(self.txtURL, 200, 30)
self.btExplorar = gtk.Button('Explorar' )
self.btExplorar.set_size_request(100, 30)
self.btExplorar.connect("clicked", self.IniciarExploracao)
self.fixed.put(self.btExplorar, 900, 30)
self.fixed.put(gtk.Label('Vulnerabilidades: ' ), 50, 90)
#criando combo box
self.ListVuln = gtk.ListStore(int,str)
self.ListVuln.append([1, "SQL Injection"])
self.ListVuln.append([2,"Cross-Site Scripiting"])
self.ComboVul = gtk.combo_box_new_with_model_and_entry (self.ListVuln)
self.ComboVul.set_entry_text_column(1)
self.ComboVul.connect("changed", self.on_name_combo_changed)
self.ComboVul.set_size_request(300,30)
self.ComboVul.set_active(0)
self.fixed.put(self.ComboVul, 200, 90)
#check box para criterio de parada
self.checkParada = gtk.CheckButton('Parar ao encontrar a primeira falha')
self.checkParada.set_size_request(300, 30)
self.checkParada.set_active(1)
self.fixed.put(self.checkParada, 600, 90)
##############################RESULTADO###########################################################
# Criando a janela para receber o resultado dos ataques.
self.scrollwinResult = gtk.ScrolledWindow()
self.scrollwinResult.set_size_request(535,500)
self.scrollwinResult.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.listResult = gtk.ListStore(str, str, str, str, str, 'gboolean')
# Criando Treeview com uma lista
self.treeview = gtk.TreeView(self.listResult)
self.scrollwinResult.add(self.treeview)
self.fixed.put(self.scrollwinResult,50,150)
# Criando cabecalho das colunas
self.tvcolumn = gtk.TreeViewColumn('Situacao')
self.tvcolumn.set_alignment(xalign=0.5)
self.tvcolumn1 = gtk.TreeViewColumn('Criticidade')
self.tvcolumn1.set_alignment(xalign=0.5)
self.tvcolumn2 = gtk.TreeViewColumn('URL')
self.tvcolumn2.set_alignment(xalign=0.5)
self.tvcolumn3 = gtk.TreeViewColumn('Componente Testado')
self.tvcolumn3.set_alignment(xalign=0.5)
# aadiciona colunas na treeview/tabela
self.treeview.append_column(self.tvcolumn)
self.treeview.append_column(self.tvcolumn1)
self.treeview.append_column(self.tvcolumn2)
self.treeview.append_column(self.tvcolumn3)
# criando as cedulas
self.cellFalha = gtk.CellRendererText()
self.cellCrit = gtk.CellRendererText()
self.cellCrit.set_alignment(xalign=0.5, yalign=0.5)
self.cellComp = gtk.CellRendererText()
self.cellTest = gtk.CellRendererText()
# set cor de fundo
self.cellFalha.set_property("foreground", "red")
# adicionando cedulas nas colunas
self.tvcolumn.pack_start(self.cellFalha, True)
self.tvcolumn1.pack_start(self.cellCrit, True)
self.tvcolumn2.pack_start(self.cellComp, True)
self.tvcolumn3.pack_start(self.cellTest, True)
self.tvcolumn.set_attributes(self.cellFalha, text=0)
self.tvcolumn1.set_attributes(self.cellCrit, text=1)
self.tvcolumn2.set_attributes(self.cellComp, text=2)
self.tvcolumn3.set_attributes(self.cellTest, text=3)
# setando opcao para pesquisa
self.treeview.set_search_column(0)
# Permitindo Ordenacao nas colunas
self.tvcolumn.set_sort_column_id(0)
self.tvcolumn1.set_sort_column_id(0)
self.tvcolumn2.set_sort_column_id(0)
self.tvcolumn3.set_sort_column_id(0)
self.LabelInfo = gtk.Label()
self.LabelInforServer = gtk.Label()
self.LabelResultado = gtk.Label()
self.LabelMitigacao = gtk.Label()
self.LabelInforServer.set_markup("<b>Informacoes do Servidor:</b>")
self.fixed.put(self.LabelInforServer, 600, 130)
self.LabelResultado.set_markup("<b>Resultados:</b>")
self.fixed.put(self.LabelResultado, 50, 130)
self.LabelMitigacao.set_markup("<b>Mitigacao:</b>")
self.fixed.put(self.LabelMitigacao, 600, 360)
#CRIANDO CAMPO DE INFORMACOES DO SERVIDOR
self.scrollwinServer = gtk.ScrolledWindow()
self.scrollwinServer.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.InfoServer = gtk.TextBuffer()
self.TextViewServer = gtk.TextView(self.InfoServer)
self.TextViewServer.set_editable(False)
self.TextViewServer.set_cursor_visible(False)
self.TextViewServer.set_wrap_mode(gtk.WRAP_WORD)
self.TextViewServer.set_size_request(400,170)
self.scrollwinServer.add(self.TextViewServer)
self.fixed.put(self.scrollwinServer, 600, 150)
#CRIANDO CAMPO DE INFORMACOES DE MITIGACAO
self.scrollwinMit = gtk.ScrolledWindow()
self.scrollwinMit.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.TextBufferMitigacao = gtk.TextBuffer()
self.TextViewMit = gtk.TextView(self.TextBufferMitigacao)
self.TextViewMit.set_editable(False)
self.TextViewMit.set_cursor_visible(False)
self.TextViewMit.set_wrap_mode(gtk.WRAP_WORD)
self.TextViewMit.set_size_request(400,270)
self.scrollwinMit.add(self.TextViewMit)
self.fixed.put(self.scrollwinMit,600,380)
#conectando a janela aos destrutores para finalizar o programa
self.janela.connect('delete_event', self.delete_event)
self.janela.connect('destroy', self.destroy)
self.janela.show_all()
def ShowError(self, title, mensagem):
dialog = gtk.MessageDialog(parent=self.janela,flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK,
message_format=mensagem)
dialog.set_title(title)
dialog.set_position(gtk.WIN_POS_CENTER_ALWAYS)
# Exibe a caixa
dialog.run()
dialog.destroy()
def IniciarExploracao(self, widget, data=None):
Inicial= time.time()
print "Start : %s" %Inicial
#LIMPANDO AS VARIAVEIS DA JANELA
self.TextBufferMitigacao.set_text("")
self.listResult.clear()
#self.progressbar.destroy()
self.InfoServer.set_text("")
self.selc = self.on_name_combo_changed(self.ComboVul)
#VERIFICANDO SE EXISTE URL
if self.txtURL.get_text() == "":
# EXIBINDO UMA POPUP DE ALERTA
self.ShowError('ALERTA', 'ATENCAO: Favor preencher o campo URL da Aplicacao!')
#VERIFICANDO SE FOI SELECIONADA ALGUMA OPCAO
elif (self.selc > 2) or (self.selc == 999):
self.InfoServer.set_text("")
self.listResult.clear()
else:
# Utilizando a classe CONTROLLER
self.control = Controller.Controller(self.txtURL.get_text())
self.control.select_Plugin(self.selc,self.checkParada.state) #envia o plugin e o criterio de parada(1 - para no primeiro)
self.teste_url = []
self.teste_url = self.control.check_URL()
if len(self.teste_url) == 2: # se a lista tiver 2 colunas entao ocorreu um erro
self.ShowError(str(self.teste_url[0]), str(self.teste_url[1]))
else:
#INSERIR AS INFORMACOES DO SERVIDOR
self.a = []
self.a.append(self.control.get_Server_Info())
self.b = str()
for n in self.a:
self.b = (str(n))
self.InfoServer.set_text(self.b)
# ADICIONA O RESULTADO DO ATAQUE NA LISTA
self.resultado = []
self.resultado = self.control.send_Exploiter()
try:
if len(self.resultado[0]) == 2 and self.resultado[0] == None:
self.ShowError(str(self.resultado[0]), str(self.resultado[1]))
except:
pass
else:
for res in self.resultado:
self.listResult.append(res)
#AQUI deve passar a lista da Controller
self.treeview.set_model(self.listResult)
TempoFinal = round (time.time() - Inicial, 2)
print TempoFinal
self.ShowError("SUCESSO!", "Exploração realizado com sucesso! \nTempo de Execução: " + str(TempoFinal) + ' segundos')
#Exibindo o texto de mitigacao quando selecionar o resultado!
self.selecao = self.treeview.get_selection()
self.selecao.connect('changed', self.on_select_change)
self.janela.show_all()
# Retornando False (falso) nesta funcao o GTK ira emitir o sinal de "destroy". Se voce retornar True
# (verdadeiro),significa que voce nao quer que a janela seja fechada
def delete_event(self, widget, event, data=None):
print "Programa encerrado!"
return False
#FUNCAO CRIADA PARA VERIFICAR O ITEM SELECIONADO NA TREEVIEW SELECIONADA
def on_select_change(self, widget):
m, itr = widget.get_selected()
if itr:
#Exibe o item da posicao 4 - mitigacao se for vulnerável
if m[itr][5]:
self.TextBufferMitigacao.set_text(m[itr][4])
self.scrollwinMit.show_all()
else:
self.TextBufferMitigacao.set_text("")
self.scrollwinMit.show_all()
def on_name_combo_changed(self, combo):
tree_iter = combo.get_active_iter()
model = combo.get_model()
if tree_iter == None:
self.ShowError('ATENCAO','Nenhuma vulnerabilidade foi selecionada!')
return 999
else:
row_id, name = model[tree_iter]
return row_id
# Outro retorno
def destroy(self, widget, data=None):
gtk.main_quit()
def main(self):
gtk.main()
# Se o programa rodar diretamente ou for passado como um argumento para o interpretador de python
# ele criara a JanelaPrincipal e o mostrara.
if __name__ == "__main__":
Janela = JanelaPrincipal()
Janela.main() | Python |
#!/usr/bin/env python
import urllib,requests,urllib2,socket,mechanize
from urlparse import urlsplit,urlparse,urlunparse,urljoin
from Data import Data_Access
########################################################################
# CLASSE PLUGINSQL FILHO DE PLUGINS
class PluginSQL (object):
"""
Responsavel por montar e analisar ataques de Injecao de Comandos SQL
"""
#----------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
self.data_Access = Data_Access(1, "PL_SQLINJECTION")
self.attack_Name = "SQL Injection"
self.attack_Criticity = "High"
#----------------------------------------------------------------------
def get_Attack (self):
"""
Retorna os ataques que estao no Banco de Dados
"""
return self.data_Access.getAttack()
#----------------------------------------------------------------------
def get_Mitigation (self):
"""
Retorna as mitigacoes que estao no Banco de Dados
"""
return self.data_Access.getMitigation()
#----------------------------------------------------------------------
def get_Impact (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Criticity
#----------------------------------------------------------------------
def get_Response (self):
"""
Retorna as respostas que estao no Banco de Dados
"""
return self.data_Access.getResponse()
#----------------------------------------------------------------------
def get_Vulnerability (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Name
#----------------------------------------------------------------------
def get_Attack_Monted (self, url):
"""
Retorna a URL concatenada com o ataque
"""
self.atack_monted = []
for atack in self.getAttack():
self.atack_monted.append(url + ''.join(atack))
return self.atack_monted
#----------------------------------------------------------------------
def mount_URL_Attack (self, url):
"""
Ataque via URL GET
"""
self.url_attack_parse = urlparse(url)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
# Pega todos os atributos da URL e os separa em uma lista
self.urls_att = []
self.urls_att.append('&'.rpartition(self.url_attack_parse.query))
#----------------------------------------------------------------------
def gen_Form_Attack(self, list_forms, url):
""""
Realiza uma chamada ao banco de dados para obter a lista de ataques e os inclui nos formularios.
LIST_FORMS: Lista contendo os nomes dos formularios presentes na pagina testada
URL: Endereco URL testado
"""
self.list_forms = []
self.list_forms = list_forms
self.list_forms_ataques = []
self.aux = []
for ataque in self.get_Attack():
# Atribui os ataques a lista de formularios enviada como parametro
for formulario in self.list_forms:
self.aux.append([formulario,''.join(ataque)])
self.list_forms_ataques.append([url, self.aux])
self.aux = []
# Retorna uma lista de formularios ja contendo os ataques
return self.list_forms_ataques
#----------------------------------------------------------------------
def analisa_Resultado (self, html, url_ataque, atributo):
"""
Analisa o codigo HTML da aplicacao, apos o ataque desferido pela classe Exploiter.
HTML: Codigo-fonte resultante do ataque; URL_ATAQUE: Endereco URL da aplicacao testada; ATRIBUTO: Variavel que sofreu o teste
"""
self.html = str(html)
self.attack_Result = []
# Verifica no codigo-fonte HTML a presenca de Strings Error Based SQL Injection
for resp in self.get_Response():
# Existe Strings, entao eh vulneravel
if self.html.find(''.join(resp)) > 0:
# Concatena as informacoes retorno
self.attack_Result.append('VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 1 = Ataque funcionou
self.attack_Result.append(True)
# Nao existe Strings, logo nao eh vulneravel a Error Based SQL Injection
else:
# Concatena as informacoes retorno
self.attack_Result.append('NAO VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 0 = Ataque nao funcionou
self.attack_Result.append(False)
return self.attack_Result
#----------------------------------------------------------------------
def gerar_Ataques (self, url):
"""
Recebe por parametro uma lista de URLs com atributos e monta uma lista de ataques em cima de cada atributo contido na URL.
"""
# Define a lista de URLs
self.URL_List_Target = []
self.URL_List_Target = url
# Define a lista de ataques para cada uma das URLs
self.lista_ataques_gerados = []
for URL_Target in self.URL_List_Target:
self.url_attack_parse = urlparse(URL_Target)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
self.lista_var = []
self.lista_var = self.url_attack_parse.query.split('&')
# Pega todos os atributos da URL e os separa em uma lista
for atributo in self.url_attack_parse.query.split('&'):
self.str_var_fix = ""
for var_fix in self.lista_var:
if var_fix == atributo:
pass
else:
self.str_var_fix = self.str_var_fix + "&" + var_fix
# Faz uma chamada ao banco de dados para obter a String de ataque e a concatena ao atributo
for atack in self.get_Attack():
self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo])
# Retorna a lista de ataques gerados pelo metodo
return self.lista_ataques_gerados | Python |
import urllib,requests,urllib2,socket,time,mechanize
from Business import PluginSQL
from bs4 import BeautifulSoup
from urlparse import urlsplit,urlparse,urlunparse,urljoin
from socket import timeout
########################################################################
class Exploiter(object):
"""
Classe responsavel por enviar os ataques e retornar o resultado gerado
"""
#----------------------------------------------------------------------
def __init__(self, url):
"""
Construtor recebe a URL alvo
"""
self.url = url
#----------------------------------------------------------------------
def mount_URLs (self):
"""
Molda a URL para URLPARSE e inclui o protocolo HTTP caso o usuario nao informar
"""
self.URL_Parse_Aux = urlparse(self.url)
if self.URL_Parse_Aux.scheme == '':
self.url_proto = "http://" + self.url
self.URL_Parse_Original = urlparse(self.url_proto)
else:
self.URL_Parse_Original = urlparse(self.url)
# Cria as URLs:
# - Original
# - Sem atributos
# - Base
try:
self.URL_Original = self.URL_Parse_Original.scheme + "://" + self.URL_Parse_Original.netloc + self.URL_Parse_Original.path + "?" + self.URL_Parse_Original.query
self.URL_Sem_Atrib = self.URL_Parse_Original.scheme + "://" + self.URL_Parse_Original.netloc + self.URL_Parse_Original.path
self.URL_Base = self.URL_Parse_Original.scheme + "://" + self.URL_Parse_Original.netloc
except socket.error, e:
return [("Socket Error"), (e)]
# Cria a conexao com a URL informada
try:
self.html = urllib2.urlopen(self.URL_Original, timeout=3)
except socket.timeout, e:
return [("Socket Error"), (e)]
except urllib2.HTTPError, e:
return [("HTTP Error"), (e)]
except urllib2.URLError, e:
return [("URL Error"), (e)]
return self.URL_Original
#----------------------------------------------------------------------
def get_URL_Base(self):
"""
Retorna a URL Base
"""
return self.URL_Base
#----------------------------------------------------------------------
def get_URL_Sem_Atrib(self):
"""
Retorna a URL sem atributos
"""
return self.URL_Sem_Atrib
#----------------------------------------------------------------------
def get_Server_Info (self):
"""
Exibe informacoes do servidor web
"""
self.b = str()
try:
self.html = urllib2.urlopen (self.URL_Original, timeout=1)
for header, value in self.html.headers.items():
self.b +=header + ' : ' + value +'\n'
return self.b
except urllib2.HTTPError, e:
return [("HTTP Error"), (e)]
#----------------------------------------------------------------------
def set_Plugin (self, plugin):
"""
Seleciona Plugin
"""
self.plugin = plugin
#----------------------------------------------------------------------
def send_Attack (self, url):
"""
Metodo que envia ataque para o servidor
"""
try:
self.html_attack = urllib2.urlopen (url, timeout=3)
self.bs = BeautifulSoup(self.html_attack.read(), 'lxml')
return self.bs.decode_contents()
except urllib2.HTTPError, e:
return [("HTTP Error"), (e)]
#----------------------------------------------------------------------
def get_Form(self, url):
"""
Verifica e retorna para os Plugins o objeto Mechanize, caso seja encontrado formularios na URL enviada
"""
self.br = mechanize.Browser()
self.br.set_handle_robots(False) # Ignora robots
self.br.set_handle_refresh(False)
self.br.open (url, timeout=3) # Cria conexao com a URL
self.resultados = []
try:
self.br.select_form(nr=0)
for control in self.br.form.controls:
if control.name is not None: # Exclui FORM None
self.resultados.append(control.name)
return self.resultados
except AttributeError as e: # Erro gerado quando nao existe FORM selecionado
return [("AttributeError"), (e)]
except mechanize._mechanize.FormNotFoundError as e: # Erro gerado quando nao existe FORM
return [("FormNotFoundError"), (e)]
return ["NAO_TEM"]
#----------------------------------------------------------------------
def get_Form_HTML(self, lista_com_forms_ataques):
"""
Submete o formulario contendo o ataque
Retorna o codigo-fonte HTML do ataque realizado para analise nos PLUGINS
"""
self.form_nova_lista = []
self.br = mechanize.Browser()
self.br.open(lista_com_forms_ataques[0])
self.br.select_form(nr=0)
self.br.set_all_readonly(False)
for forms_ataques in lista_com_forms_ataques[1]:
self.br.form[forms_ataques[0]] = str(forms_ataques[1])
self.br.submit()
self.form_nova_lista.append([self.br.response().read(), lista_com_forms_ataques])
return self.form_nova_lista
#----------------------------------------------------------------------
def gen_URL_Encode (self, url):
"""
Retorna a URL Encode
"""
print self.url_encode
#----------------------------------------------------------------------
def execute_Crawler (self):
"""
Funcao que extrai todos os links com query (GET) na pagina testada
Retorna lista com as URL
"""
self.br = mechanize.Browser()
self.br.set_handle_robots(False) # Ignora robots
self.br.set_handle_refresh(False)
self.br.open(self.URL_Original, timeout=3)
self.links = []
self.targuets = []
for link in self.br.links():
if link.url.startswith( '/'):
self.links.append(self.URL_Base+link.url)
else:
self.links.append(self.URL_Base+"/"+link.url)
for atrib in self.links:
self.url_aux = urlparse(atrib)
if self.url_aux.query != "":
self.targuets.append (self.url_aux.scheme + "://" + self.url_aux.netloc + self.url_aux.path + "?" + self.url_aux.query)
else:
pass
# Remove links duplicados
self.targuets = list(set(self.targuets))
return self.targuets
#----------------------------------------------------------------------
def get_All_Targets (self):
"""
Extrai todos os alvos
"""
self.lista_de_ataques = []
self.lista_de_links = []
# Inlui na lista de ataques caso a URL possua atributos
if (self.URL_Parse_Original.params != "") or (self.URL_Parse_Original.query != ""):
self.lista_de_ataques.append(self.URL_Original)
# Inclui os links internos encontrados no Crawler na lista de ataques
self.lista_de_links = self.execute_Crawler()
for links in self.lista_de_links:
self.lista_de_ataques.append(links)
# Remove links duplicados
self.lista_de_ataques = list(set(self.lista_de_ataques))
return self.lista_de_ataques | Python |
#!/usr/bin/env python
import urllib,requests,urllib2,socket,mechanize
from urlparse import urlsplit,urlparse,urlunparse,urljoin
from Data import Data_Access
########################################################################
# CLASSE PLUGINSQL FILHO DE PLUGINS
class PluginSQL (object):
"""
Responsavel por montar e analisar ataques de Injecao de Comandos SQL
"""
#----------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
self.data_Access = Data_Access(1, "PL_SQLINJECTION")
self.attack_Name = "SQL Injection"
self.attack_Criticity = "High"
#----------------------------------------------------------------------
def get_Attack (self):
"""
Retorna os ataques que estao no Banco de Dados
"""
return self.data_Access.getAttack()
#----------------------------------------------------------------------
def get_Mitigation (self):
"""
Retorna as mitigacoes que estao no Banco de Dados
"""
return self.data_Access.getMitigation()
#----------------------------------------------------------------------
def get_Impact (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Criticity
#----------------------------------------------------------------------
def get_Response (self):
"""
Retorna as respostas que estao no Banco de Dados
"""
return self.data_Access.getResponse()
#----------------------------------------------------------------------
def get_Vulnerability (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Name
#----------------------------------------------------------------------
def get_Attack_Monted (self, url):
"""
Retorna a URL concatenada com o ataque
"""
self.atack_monted = []
for atack in self.getAttack():
self.atack_monted.append(url + ''.join(atack))
return self.atack_monted
#----------------------------------------------------------------------
def mount_URL_Attack (self, url):
"""
Ataque via URL GET
"""
self.url_attack_parse = urlparse(url)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
# Pega todos os atributos da URL e os separa em uma lista
self.urls_att = []
self.urls_att.append('&'.rpartition(self.url_attack_parse.query))
#----------------------------------------------------------------------
def gen_Form_Attack(self, list_forms, url):
""""
Realiza uma chamada ao banco de dados para obter a lista de ataques e os inclui nos formularios.
LIST_FORMS: Lista contendo os nomes dos formularios presentes na pagina testada
URL: Endereco URL testado
"""
self.list_forms = []
self.list_forms = list_forms
self.list_forms_ataques = []
self.aux = []
for ataque in self.get_Attack():
# Atribui os ataques a lista de formularios enviada como parametro
for formulario in self.list_forms:
self.aux.append([formulario,''.join(ataque)])
self.list_forms_ataques.append([url, self.aux])
self.aux = []
# Retorna uma lista de formularios ja contendo os ataques
return self.list_forms_ataques
#----------------------------------------------------------------------
def analisa_Resultado (self, html, url_ataque, atributo):
"""
Analisa o codigo HTML da aplicacao, apos o ataque desferido pela classe Exploiter.
HTML: Codigo-fonte resultante do ataque; URL_ATAQUE: Endereco URL da aplicacao testada; ATRIBUTO: Variavel que sofreu o teste
"""
self.html = str(html)
self.attack_Result = []
# Verifica no codigo-fonte HTML a presenca de Strings Error Based SQL Injection
for resp in self.get_Response():
# Existe Strings, entao eh vulneravel
if self.html.find(''.join(resp)) > 0:
# Concatena as informacoes retorno
self.attack_Result.append('VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 1 = Ataque funcionou
self.attack_Result.append(True)
# Nao existe Strings, logo nao eh vulneravel a Error Based SQL Injection
else:
# Concatena as informacoes retorno
self.attack_Result.append('NAO VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 0 = Ataque nao funcionou
self.attack_Result.append(False)
return self.attack_Result
#----------------------------------------------------------------------
def gerar_Ataques (self, url):
"""
Recebe por parametro uma lista de URLs com atributos e monta uma lista de ataques em cima de cada atributo contido na URL.
"""
# Define a lista de URLs
self.URL_List_Target = []
self.URL_List_Target = url
# Define a lista de ataques para cada uma das URLs
self.lista_ataques_gerados = []
for URL_Target in self.URL_List_Target:
self.url_attack_parse = urlparse(URL_Target)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
self.lista_var = []
self.lista_var = self.url_attack_parse.query.split('&')
# Pega todos os atributos da URL e os separa em uma lista
for atributo in self.url_attack_parse.query.split('&'):
self.str_var_fix = ""
for var_fix in self.lista_var:
if var_fix == atributo:
pass
else:
self.str_var_fix = self.str_var_fix + "&" + var_fix
# Faz uma chamada ao banco de dados para obter a String de ataque e a concatena ao atributo
for atack in self.get_Attack():
self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo])
# Retorna a lista de ataques gerados pelo metodo
return self.lista_ataques_gerados | Python |
#!/usr/bin/env python
import urllib,requests,urllib2,socket,mechanize
from urlparse import urlsplit,urlparse,urlunparse,urljoin
from Data import Data_Access
from bs4 import BeautifulSoup
########################################################################
class PluginXSS (object):
"""
Responsavel por montar e analisar ataques de Injecao de Codigos Script
"""
#----------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
self.data_Access = Data_Access(2, "PL_XSS")
self.attack_Name = "XSS Injection"
self.attack_Criticity = "High"
#----------------------------------------------------------------------
def get_Attack (self):
"""
Retorna os ataques que estao no Banco de Dados
"""
return self.data_Access.getAttack()
#----------------------------------------------------------------------
def get_Mitigation (self):
"""
Retorna as mitigacoes que estao no Banco de Dados
"""
return self.data_Access.getMitigation()
#----------------------------------------------------------------------
def get_Impact (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Criticity
#----------------------------------------------------------------------
def get_Response (self):
"""
Retorna as respostas que estao no Banco de Dados
"""
return self.data_Access.getResponse()
#----------------------------------------------------------------------
def get_Vulnerability (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Name
#----------------------------------------------------------------------
def get_Attack_Monted (self, url):
"""
Retorna a URL concatenada com o ataque
"""
self.atack_monted = []
for atack in self.getAttack():
self.atack_monted.append(url + ''.join(atack))
return self.atack_monted
#----------------------------------------------------------------------
def mount_URL_Attack (self, url):
"""
Ataque via URL GET
"""
self.url_attack_parse = urlparse(url)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
# Pega todos os atributos da URL e os separa em uma lista
self.urls_att = []
self.urls_att.append('&'.rpartition(self.url_attack_parse.query))
#----------------------------------------------------------------------
def gen_Form_Attack(self, list_forms, url):
"""
Inclui no formulario o ataque
"""
self.list_forms = []
self.list_forms = list_forms
self.list_forms_ataques = []
self.aux = []
for ataque in self.get_Attack():
for formulario in self.list_forms:
self.aux.append([formulario,''.join(ataque)])
self.list_forms_ataques.append([url, self.aux])
self.aux = []
return self.list_forms_ataques
#----------------------------------------------------------------------
def analisa_Resultado (self, html, url_ataque, atributo):
"""
Analisa o codigo da pagina HTML se existe a presenca de Injecao de codigos Script
"""
self.bsxss = BeautifulSoup(html, 'lxml')
self.script = self.bsxss.find_all ('script')
self.attack_Result = []
# Verifica se existe getResponse no codigo-fonte da pagina atacada
for resp in self.script:
if resp.find('<script>alert(\'__XSS__\')</script>') != -1:
# Concatena as informacoes retorno
self.attack_Result.append('VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 1 = Ataque funcionou
self.attack_Result.append(True)
else:
# Concatena as informacoes retorno
self.attack_Result.append('NAO VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 0 = Ataque nao funcionou
self.attack_Result.append(False)
return self.attack_Result
#----------------------------------------------------------------------
def gerar_Ataques (self, url):
"""
Ataca os parametros da URL informada
"""
self.URL_List_Target = []
self.URL_List_Target = url
self.lista_ataques_gerados = []
for URL_Target in self.URL_List_Target:
self.url_attack_parse = urlparse(URL_Target)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
self.lista_var = []
self.lista_var = self.url_attack_parse.query.split('&')
# Pega todos os atributos da URL e os separa em uma lista
for atributo in self.url_attack_parse.query.split('&'):
self.str_var_fix = ""
for var_fix in self.lista_var:
if var_fix == atributo:
pass
else:
self.str_var_fix = self.str_var_fix + "&" + var_fix
# Pega o atributo e inclui o ataque nele
for atack in self.get_Attack():
self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo])
return self.lista_ataques_gerados | Python |
import urllib
from Business import PluginSQL
from bs4 import BeautifulSoup
from urlparse import urlparse
# Estava pensando em colocar o Exploiter para fazer o ataque e a análise
########################################################################
class Exploiter(object):
""""""
#----------------------------------------------------------------------
# Construtor recebe a URL alvo e o objeto Plugin dos ataques
#def __init__(self, url, pluginAttack):
def __init__(self, url):
"""Constructor"""
try:
self.url = url
# Cria a conexao com a URL informada
self.html = urllib.urlopen (url)
# Cria o objeto BeautifulSoup
self.bs = BeautifulSoup(self.html.read(), "lxml")
self.bs = BeautifulSoup()
except urllib.NameError as e:
print e
# Exibe informacoes do servidor web
def getServerInfo (self):
#for header, value in self.url.headers.items():
#print header + ' : ' + value
return self.url.headers.items()
# Metodo que envia ataque para o servidor
def sendAttack (self, attack):
self.pl_plugin = attack
#self.concatenacao = []
#self.i = 0
for n in attack.get_Attack_Monted (self.url):
#self.concatenacao.append(self.url+ str(n))
self.html_attack = urllib.urlopen (n)
# Verifica se existe getResponse no codigo-fonte da pagina atacada
#if self.html_attack.read() != "MySQL":
print self.html_attack.read()
# Sai do laco e retorna o ataque
# METODO QUE RETORNA O ATAQUE!!!
# Molda a URL
def generate_URL (self):
self.url_parse = urlparse(url)
self.url_parse_base = self.url_parse.scheme + "://" + self.url_parse.netloc + "/"
# Retorna a URL
def get_URL (self):
return self.url
# Retorna a URL Parseada
def get_URL_Parse (self):
return self.url_parse
# Retorna a URL Parseada Base
def get_URL_Parse_Base (self):
return self.url_parse_base
# Retorna a URL Encode
def gen_URL_Encode (self, url):
#self.url_encode = urllib.urlencode(self.url : )
print self.url_encode
# Localiza um ataque no codigo-fonte
def find_response (self, bs, response):
self.bs_auxiliar = bs
self.response_auxiliar = response
#self.bs_auxiliar = BeautifulSoup(html.read(), "lxml")
#url = "http://10.37.129.4/cat.php?id=1"
#url_base = "http://10.37.129.4/cat.php"
#args = {'id' : 1}
#sql1 = PluginSQL.PluginSQL()
#temp = sql1.getAttack()
#for z in temp:
#argu = {'id' : z}
#encode_args = urllib.urlencode(argu)
#print encode_args
#print z
#url2 = urlparse(url)
#url1 = self.url_base.scheme + "://" + self.url_base.netloc + "/" + self.allLinks[self.i]['href']
#print url2.scheme
vv = Exploiter("http://10.37.129.4/cat.php?id=1")
pl2 = PluginSQL.PluginSQL()
vv.sendAttack(pl2)
#vv.gen_URL_Encode("id=1' or '3=3")
| Python |
from urlparse import urlparse
from Business import Exploiter
from Business import PluginSQL
from Business import PluginXSS
from bs4 import BeautifulSoup
########################################################################
class Controller (object):
""""""
#----------------------------------------------------------------------
def __init__(self, url):
"""
Constructor
"""
# Base da URL informada
self.exploiter = Exploiter.Exploiter(url)
self.URL_Original = self.exploiter.mount_URLs()
#----------------------------------------------------------------------
def check_URL (self):
"""
Verifica se a URL infromada esta de acordo e funcional para ser utilizada
"""
return self.exploiter.mount_URLs()
#----------------------------------------------------------------------
def get_Server_Info (self):
"""
Retorna as informacoes do servidor
"""
self.get_server_Info = self.exploiter.get_Server_Info()
return self.get_server_Info
#----------------------------------------------------------------------
def select_Plugin (self, plugin, stop_scan):
"""
Seleciona quais ataques devem ser realizados
"""
self.stop_scan = stop_scan
if plugin == 1:
self.pl_attack = PluginSQL.PluginSQL()
if plugin == 2:
self.pl_attack = PluginXSS.PluginXSS()
#----------------------------------------------------------------------
def send_Exploiter (self):
"""
Pega a URL informada, mais o plugin selecionado e retorna com o ataque
"""
# Inclui as URLs na lista self.targets
self.targets = []
self.targets = self.exploiter.get_All_Targets()
# Inclui os ataques na lista self.url_plugin
self.url_plugin = []
self.url_plugin = self.pl_attack.gerar_Ataques(self.targets)
self.retorno_plugin = []
for url_ataque in self.url_plugin:
self.html = self.exploiter.send_Attack(url_ataque[0])
self.retorno_plugin.append(self.pl_attack.analisa_Resultado(self.html, url_ataque[0], url_ataque[1]))
self.resultado = self.pl_attack.analisa_Resultado(self.html, url_ataque[0], url_ataque[1])
try:
if (self.resultado[0] == 'VULNERAVEL') and (self.stop_scan == 1) and (self.resultado[0] != None):
return self.retorno_plugin
except:
pass
#FORM ######
self.ex_get_Form = []
self.ex_get_Form = self.exploiter.get_Form(self.URL_Original)
# Recebe uma lista com as URL, Formulario e Ataque
self.pl_gen_Form_Attack = []
self.pl_gen_Form_Attack = self.pl_attack.gen_Form_Attack(self.ex_get_Form, self.URL_Original)
# Recebe uma lista com o codigo-fonte da pagina atacada
self.respostas_html = []
try:
for n in self.pl_gen_Form_Attack:
self.respostas_html.append(self.exploiter.get_Form_HTML(n))
for html in self.respostas_html:
self.atributo_form = "FORMULARIOS: "
for formu in html[0][1][1]:
self.atributo_form += ''.join(formu [0]) + " "
self.atributo_form += "ATAQUE: " + ''.join(formu [1])
self.retorno_plugin.append(self.pl_attack.analisa_Resultado(html[0][0], html[0][1][0], self.atributo_form))
if (self.resultado[0] == 'VULNERAVEL') and (self.stop_scan == 1):
return self.retorno_plugin
except:
pass
# FINAL FORM ######
return self.retorno_plugin
| Python |
#!/usr/bin/env python
import urllib,requests,urllib2,socket,mechanize
from urlparse import urlsplit,urlparse,urlunparse,urljoin
from Data import Data_Access
from bs4 import BeautifulSoup
########################################################################
class PluginXSS (object):
"""
Responsavel por montar e analisar ataques de Injecao de Codigos Script
"""
#----------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
self.data_Access = Data_Access(2, "PL_XSS")
self.attack_Name = "XSS Injection"
self.attack_Criticity = "High"
#----------------------------------------------------------------------
def get_Attack (self):
"""
Retorna os ataques que estao no Banco de Dados
"""
return self.data_Access.getAttack()
#----------------------------------------------------------------------
def get_Mitigation (self):
"""
Retorna as mitigacoes que estao no Banco de Dados
"""
return self.data_Access.getMitigation()
#----------------------------------------------------------------------
def get_Impact (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Criticity
#----------------------------------------------------------------------
def get_Response (self):
"""
Retorna as respostas que estao no Banco de Dados
"""
return self.data_Access.getResponse()
#----------------------------------------------------------------------
def get_Vulnerability (self):
"""
Retorna o nome da vulnerabilidade
"""
return self.attack_Name
#----------------------------------------------------------------------
def get_Attack_Monted (self, url):
"""
Retorna a URL concatenada com o ataque
"""
self.atack_monted = []
for atack in self.getAttack():
self.atack_monted.append(url + ''.join(atack))
return self.atack_monted
#----------------------------------------------------------------------
def mount_URL_Attack (self, url):
"""
Ataque via URL GET
"""
self.url_attack_parse = urlparse(url)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
# Pega todos os atributos da URL e os separa em uma lista
self.urls_att = []
self.urls_att.append('&'.rpartition(self.url_attack_parse.query))
#----------------------------------------------------------------------
def gen_Form_Attack(self, list_forms, url):
"""
Inclui no formulario o ataque
"""
self.list_forms = []
self.list_forms = list_forms
self.list_forms_ataques = []
self.aux = []
for ataque in self.get_Attack():
for formulario in self.list_forms:
self.aux.append([formulario,''.join(ataque)])
self.list_forms_ataques.append([url, self.aux])
self.aux = []
return self.list_forms_ataques
#----------------------------------------------------------------------
def analisa_Resultado (self, html, url_ataque, atributo):
"""
Analisa o codigo da pagina HTML se existe a presenca de Injecao de codigos Script
"""
self.bsxss = BeautifulSoup(html, 'lxml')
self.script = self.bsxss.find_all ('script')
self.attack_Result = []
# Verifica se existe getResponse no codigo-fonte da pagina atacada
for resp in self.script:
if resp.find('<script>alert(\'__XSS__\')</script>') != -1:
# Concatena as informacoes retorno
self.attack_Result.append('VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 1 = Ataque funcionou
self.attack_Result.append(True)
else:
# Concatena as informacoes retorno
self.attack_Result.append('NAO VULNERAVEL')
self.attack_Result.append(self.get_Impact())
self.attack_Result.append(url_ataque)
self.attack_Result.append(atributo)
self.attack_Result.append(self.get_Mitigation())
# 0 = Ataque nao funcionou
self.attack_Result.append(False)
return self.attack_Result
#----------------------------------------------------------------------
def gerar_Ataques (self, url):
"""
Ataca os parametros da URL informada
"""
self.URL_List_Target = []
self.URL_List_Target = url
self.lista_ataques_gerados = []
for URL_Target in self.URL_List_Target:
self.url_attack_parse = urlparse(URL_Target)
self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?"
self.url_atributos_ataques = []
self.atributo_ataque = []
self.lista_var = []
self.lista_var = self.url_attack_parse.query.split('&')
# Pega todos os atributos da URL e os separa em uma lista
for atributo in self.url_attack_parse.query.split('&'):
self.str_var_fix = ""
for var_fix in self.lista_var:
if var_fix == atributo:
pass
else:
self.str_var_fix = self.str_var_fix + "&" + var_fix
# Pega o atributo e inclui o ataque nele
for atack in self.get_Attack():
self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo])
return self.lista_ataques_gerados | Python |
import MySQLdb
########################################################################
class Data_Access (object):
#----------------------------------------------------------------------
def __init__ (self, id_vulnerability, pl_attack):
"""
Construtor:
Cria conexão com a Base de Dados
"""
try: # Ainda falta tratar erros...
db = MySQLdb.connect (
host = 'localhost',
user = 'root',
passwd = '',
db = 'DB_1CLIC2LEARN'
)
self.id_vulnerability = id_vulnerability
self.pl_attack = pl_attack
except _mysql_exceptions.OperationalError as e:
print e
return [("Can't connect to local MySQL server"), (e)]
#----------------------------------------------------------------------
def get_Attack (self):
"""
Retorna os ataques
"""
self.cursor = db.cursor()
self.cursor.execute('SELECT ATTACK FROM ' + pl_attack)
self.result = cursor.fetchall()
if result:
for z in result:
print z
return result
#----------------------------------------------------------------------
def get_Mitigation (self):
"""
Retorna formas de mitigar a vulnerabilidade identificada nos testes
"""
self.cursor = db.cursor()
self.cursor.execute('SELECT MITIGATION FROM ' + id_vulnerability)
self.result = cursor.fetchall()
if result:
for z in result:
print z
return result
#----------------------------------------------------------------------
def get_Response (self):
"""
Retorna as respostas que a vulnerabilidade informada apresenta
"""
self.cursor = db.cursor()
self.cursor.execute('SELECT ATTACK_RESPONSE FROM ' + id_vulnerability)
self.result = cursor.fetchall()
if result:
for z in result:
print z
return result
| Python |
import MySQLdb
########################################################################
class Data_Access (object):
"""
Cria conexao com a Base de Dados
"""
dba = MySQLdb.connect (host = 'localhost', user = 'root', passwd = '', db = 'DB_1CLIC2LEARN')
#----------------------------------------------------------------------
def __init__(self, id_vulnerability, pl_attack):
"""
Construtor:
Configura para a vulnerabilidade selecionada
"""
self.id_vulnerability = id_vulnerability
self.pl_attack = pl_attack
#----------------------------------------------------------------------
def getAttack (self):
"""
RESTORNA OS ATAQUES DA VULNERABILIDADE INFORMADA
"""
self.cursor = self.dba.cursor()
self.cursor.execute('SELECT ATTACK FROM ' + self.pl_attack)
self.result = self.cursor.fetchall()
return self.result
#----------------------------------------------------------------------
def getMitigation (self):
"""
RESTORNA A FORMA DE MITIGAR A VULNERABILIDADE INFORMADA
"""
self.cursor = self.dba.cursor()
self.cursor.execute('SELECT MITIGATION FROM MITIGATION WHERE ID_VULNERABILITY = ' + str(self.id_vulnerability))
self.result = self.cursor.fetchone()
return self.result
#----------------------------------------------------------------------
def getResponse (self):
"""
RESTORNA AS RESPOSTAS QUE A VULNERABILIDADE INFORMADA APRESENTA
"""
self.cursor = self.dba.cursor()
self.cursor.execute('SELECT RESPONSE FROM ATTACK_RESPONSE WHERE ID_VULNERABILITY = ' + str(self.id_vulnerability))
self.result = self.cursor.fetchall()
return self.result
| Python |
import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainPage(webapp2.RequestHandler):
def get(self):
template_values = {}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
| Python |
#! /usr/bin/env python
# encoding: utf-8
# waf 1.6.10
VERSION='0.3.3'
import sys
APPNAME='p2t'
top = '.'
out = 'build'
CPP_SOURCES = ['poly2tri/common/shapes.cc',
'poly2tri/sweep/cdt.cc',
'poly2tri/sweep/advancing_front.cc',
'poly2tri/sweep/sweep_context.cc',
'poly2tri/sweep/sweep.cc',
'testbed/main.cc']
from waflib.Tools.compiler_cxx import cxx_compiler
cxx_compiler['win32'] = ['g++']
#Platform specific libs
if sys.platform == 'win32':
# MS Windows
sys_libs = ['glfw', 'opengl32']
elif sys.platform == 'darwin':
# Apple OSX
sys_libs = ['glfw', 'OpenGL']
else:
# GNU/Linux, BSD, etc
sys_libs = ['glfw', 'GL']
def options(opt):
print(' set_options')
opt.load('compiler_cxx')
def configure(conf):
print(' calling the configuration')
conf.load('compiler_cxx')
conf.env.CXXFLAGS = ['-O3', '-ffast-math']
conf.env.DEFINES_P2T = ['P2T']
conf.env.LIB_P2T = sys_libs
def build(bld):
print(' building')
bld.program(features = 'cxx cxxprogram', source=CPP_SOURCES, target = 'p2t', uselib = 'P2T')
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
#!/usr/bin/env python
"""
tesshelper.py -- Utility operations to compare, report stats, and copy
public headers for tesseract 3.0x VS2008 Project
$RCSfile: tesshelper.py,v $ $Revision: 7ca575b377aa $ $Date: 2012/03/07 17:26:31 $
"""
r"""
Requires:
python 2.7 or greater: activestate.com
http://www.activestate.com/activepython/downloads
because using the new argparse module and new literal set syntax (s={1, 2}) .
General Notes:
--------------
Format for a .vcproj file entry:
<File
RelativePath="..\src\allheaders.h"
>
</File>
"""
epilogStr = r"""
Examples:
Assume that tesshelper.py is in c:\buildfolder\tesseract-3.02\vs2008,
which is also the current directory. Then,
python tesshelper .. compare
will compare c:\buildfolder\tesseract-3.02 "library" directories to the
libtesseract Project
(c:\buildfolder\tesseract-3.02\vs2008\libtesseract\libtesseract.vcproj).
python tesshelper .. report
will display summary stats for c:\buildfolder\tesseract-3.02 "library"
directories and the libtesseract Project.
python tesshelper .. copy ..\..\include
will copy all "public" libtesseract header files to
c:\buildfolder\include.
python tesshelper .. clean
will clean the vs2008 folder of all build directories, and .user, .suo,
.ncb, and other temp files.
"""
# imports of python standard library modules
# See Python Documentation | Library Reference for details
import collections
import glob
import argparse
import os
import re
import shutil
import sys
# ====================================================================
VERSION = "1.0 %s" % "$Date: 2012/03/07 17:26:31 $".split()[1]
PROJ_SUBDIR = r"vs2008\libtesseract"
PROJFILE = "libtesseract.vcproj"
NEWHEADERS_FILENAME = "newheaders.txt"
NEWSOURCES_FILENAME = "newsources.txt"
fileNodeTemplate = \
''' <File
RelativePath="..\..\%s"
>
</File>
'''
# ====================================================================
def getProjectfiles(libTessDir, libProjectFile, nTrimChars):
"""Return sets of all, c, h, and resources files in libtesseract Project"""
#extract filenames of header & source files from the .vcproj
projectCFiles = set()
projectHFiles = set()
projectRFiles = set()
projectFilesSet = set()
f = open(libProjectFile, "r")
data = f.read()
f.close()
projectFiles = re.findall(r'(?i)RelativePath="(\.[^"]+)"', data)
for projectFile in projectFiles:
root, ext = os.path.splitext(projectFile.lower())
if ext == ".c" or ext == ".cpp":
projectCFiles.add(projectFile)
elif ext == ".h":
projectHFiles.add(projectFile)
elif ext == ".rc":
projectRFiles.add(projectFile)
else:
print "unknown file type: %s" % projectFile
relativePath = os.path.join(libTessDir, projectFile)
relativePath = os.path.abspath(relativePath)
relativePath = relativePath[nTrimChars:].lower()
projectFilesSet.add(relativePath)
return projectFilesSet, projectHFiles, projectCFiles, projectRFiles
def getTessLibFiles(tessDir, nTrimChars):
"""Return set of all libtesseract files in tessDir"""
libDirs = [
"api",
"ccmain",
"ccstruct",
"ccutil",
"classify",
"cube",
"cutil",
"dict",
r"neural_networks\runtime",
"opencl",
"textord",
"viewer",
"wordrec",
#"training",
r"vs2008\port",
r"vs2008\libtesseract",
]
#create list of all .h, .c, .cpp files in "library" directories
tessFiles = set()
for curDir in libDirs:
baseDir = os.path.join(tessDir, curDir)
for filetype in ["*.c", "*.cpp", "*.h", "*.rc"]:
pattern = os.path.join(baseDir, filetype)
fileList = glob.glob(pattern)
for curFile in fileList:
curFile = os.path.abspath(curFile)
relativePath = curFile[nTrimChars:].lower()
tessFiles.add(relativePath)
return tessFiles
# ====================================================================
def tessCompare(tessDir):
'''Compare libtesseract Project files and actual "sub-library" files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
print 'Comparing VS2008 Project "%s" with\n "%s"' % (libProjectFile,
tessAbsDir)
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
extraFiles = tessFiles - projectFilesSet
print "%2d Extra files (in %s but not in Project)" % (len(extraFiles),
tessAbsDir)
headerFiles = []
sourceFiles = []
sortedList = list(extraFiles)
sortedList.sort()
for filename in sortedList:
root, ext = os.path.splitext(filename.lower())
if ext == ".h":
headerFiles.append(filename)
else:
sourceFiles.append(filename)
print " %s " % filename
print
print "%2d new header file items written to %s" % (len(headerFiles),
NEWHEADERS_FILENAME)
headerFiles.sort()
with open(NEWHEADERS_FILENAME, "w") as f:
for filename in headerFiles:
f.write(fileNodeTemplate % filename)
print "%2d new source file items written to %s" % (len(sourceFiles),
NEWSOURCES_FILENAME)
sourceFiles.sort()
with open(NEWSOURCES_FILENAME, "w") as f:
for filename in sourceFiles:
f.write(fileNodeTemplate % filename)
print
deadFiles = projectFilesSet - tessFiles
print "%2d Dead files (in Project but not in %s" % (len(deadFiles),
tessAbsDir)
sortedList = list(deadFiles)
sortedList.sort()
for filename in sortedList:
print " %s " % filename
# ====================================================================
def tessReport(tessDir):
"""Report summary stats on "sub-library" files and libtesseract Project file."""
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
print 'Summary stats for "%s" library directories' % tessAbsDir
folderCounters = {}
for tessFile in tessFiles:
tessFile = tessFile.lower()
folder, head = os.path.split(tessFile)
file, ext = os.path.splitext(head)
typeCounter = folderCounters.setdefault(folder, collections.Counter())
typeCounter[ext[1:]] += 1
folders = folderCounters.keys()
folders.sort()
totalFiles = 0
totalH = 0
totalCPP = 0
totalOther = 0
print
print " total h cpp"
print " ----- --- ---"
for folder in folders:
counters = folderCounters[folder]
nHFiles = counters['h']
nCPPFiles = counters['cpp']
total = nHFiles + nCPPFiles
totalFiles += total
totalH += nHFiles
totalCPP += nCPPFiles
print " %5d %3d %3d %s" % (total, nHFiles, nCPPFiles, folder)
print " ----- --- ---"
print " %5d %3d %3d" % (totalFiles, totalH, totalCPP)
print
print 'Summary stats for VS2008 Project "%s"' % libProjectFile
print " %5d %s" %(len(projectHFiles), "Header files")
print " %5d %s" % (len(projectCFiles), "Source files")
print " %5d %s" % (len(projectRFiles), "Resource files")
print " -----"
print " %5d" % (len(projectHFiles) + len(projectCFiles) + len(projectRFiles), )
# ====================================================================
def copyIncludes(fileSet, description, tessDir, includeDir):
"""Copy set of files to specified include dir."""
print
print 'Copying libtesseract "%s" headers to %s' % (description, includeDir)
print
sortedList = list(fileSet)
sortedList.sort()
count = 0
errList = []
for includeFile in sortedList:
filepath = os.path.join(tessDir, includeFile)
if os.path.isfile(filepath):
shutil.copy2(filepath, includeDir)
print "Copied: %s" % includeFile
count += 1
else:
print '***Error: "%s" doesn\'t exist"' % filepath
errList.append(filepath)
print '%d header files successfully copied to "%s"' % (count, includeDir)
if len(errList):
print "The following %d files were not copied:"
for filepath in errList:
print " %s" % filepath
def tessCopy(tessDir, includeDir):
'''Copy all "public" libtesseract Project header files to include directory.
Preserves directory hierarchy.'''
baseIncludeSet = {
r"api\baseapi.h",
r"api\capi.h",
r"api\apitypes.h",
r"ccstruct\publictypes.h",
r"ccmain\thresholder.h",
r"ccutil\host.h",
r"ccutil\basedir.h",
r"ccutil\tesscallback.h",
r"ccutil\unichar.h",
r"ccutil\platform.h",
}
strngIncludeSet = {
r"ccutil\strngs.h",
r"ccutil\memry.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\errcode.h",
r"ccutil\fileerr.h",
#r"ccutil\genericvector.h",
}
resultIteratorIncludeSet = {
r"ccmain\ltrresultiterator.h",
r"ccmain\pageiterator.h",
r"ccmain\resultiterator.h",
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
r"ccutil\params.h",
r"ccutil\unicharmap.h",
r"ccutil\unicharset.h",
}
genericVectorIncludeSet = {
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
}
blobsIncludeSet = {
r"ccstruct\blobs.h",
r"ccstruct\rect.h",
r"ccstruct\points.h",
r"ccstruct\ipoints.h",
r"ccutil\elst.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\lsterr.h",
r"ccutil\ndminx.h",
r"ccutil\tprintf.h",
r"ccutil\params.h",
r"viewer\scrollview.h",
r"ccstruct\vecfuncs.h",
}
extraFilesSet = {
#r"vs2008\include\stdint.h",
r"vs2008\include\leptonica_versionnumbers.vsprops",
r"vs2008\include\tesseract_versionnumbers.vsprops",
}
tessIncludeDir = os.path.join(includeDir, "tesseract")
if os.path.isfile(tessIncludeDir):
print 'Aborting: "%s" is a file not a directory.' % tessIncludeDir
return
if not os.path.exists(tessIncludeDir):
os.mkdir(tessIncludeDir)
#fileSet = baseIncludeSet | strngIncludeSet | genericVectorIncludeSet | blobsIncludeSet
fileSet = baseIncludeSet | strngIncludeSet | resultIteratorIncludeSet
copyIncludes(fileSet, "public", tessDir, tessIncludeDir)
copyIncludes(extraFilesSet, "extra", tessDir, includeDir)
# ====================================================================
def tessClean(tessDir):
'''Clean vs2008 folder of all build directories and certain temp files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
vs2008AbsDir = os.path.abspath(vs2008Dir)
answer = raw_input(
'Are you sure you want to clean the\n "%s" folder (Yes/No) [No]? ' %
vs2008AbsDir)
if answer.lower() not in ("yes",):
return
answer = raw_input('Only list the items to be deleted (Yes/No) [Yes]? ')
answer = answer.strip()
listOnly = answer.lower() not in ("no",)
for rootDir, dirs, files in os.walk(vs2008AbsDir):
for buildDir in ("LIB_Release", "LIB_Debug", "DLL_Release", "DLL_Debug"):
if buildDir in dirs:
dirs.remove(buildDir)
absBuildDir = os.path.join(rootDir, buildDir)
if listOnly:
print "Would remove: %s" % absBuildDir
else:
print "Removing: %s" % absBuildDir
shutil.rmtree(absBuildDir)
if rootDir == vs2008AbsDir:
for file in files:
if file.lower() not in ("tesseract.sln",
"tesshelper.py",
"readme.txt"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
else:
for file in files:
root, ext = os.path.splitext(file)
if ext.lower() in (".suo",
".ncb",
".user",
) or (
len(ext)>0 and ext[-1] == "~"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
# ====================================================================
def validateTessDir(tessDir):
"""Check that tessDir is a valid tesseract directory."""
if not os.path.isdir(tessDir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % tessDir)
projFile = os.path.join(tessDir, PROJ_SUBDIR, PROJFILE)
if not os.path.isfile(projFile):
raise argparse.ArgumentTypeError('Project file "%s" doesn\'t exist.' % projFile)
return tessDir
def validateDir(dir):
"""Check that dir is a valid directory named include."""
if not os.path.isdir(dir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % dir)
dirpath = os.path.abspath(dir)
head, tail = os.path.split(dirpath)
if tail.lower() != "include":
raise argparse.ArgumentTypeError('Include directory "%s" must be named "include".' % tail)
return dir
def main ():
parser = argparse.ArgumentParser(
epilog=epilogStr,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--version", action="version",
version="%(prog)s " + VERSION)
parser.add_argument('tessDir', type=validateTessDir,
help="tesseract installation directory")
subparsers = parser.add_subparsers(
dest="subparser_name",
title="Commands")
parser_changes = subparsers.add_parser('compare',
help="compare libtesseract Project with tessDir")
parser_changes.set_defaults(func=tessCompare)
parser_report = subparsers.add_parser('report',
help="report libtesseract summary stats")
parser_report.set_defaults(func=tessReport)
parser_copy = subparsers.add_parser('copy',
help="copy public libtesseract header files to includeDir")
parser_copy.add_argument('includeDir', type=validateDir,
help="Directory to copy header files to.")
parser_copy.set_defaults(func=tessCopy)
parser_clean = subparsers.add_parser('clean',
help="clean vs2008 folder of build folders and .user files")
parser_clean.set_defaults(func=tessClean)
#kludge because argparse has no ability to set default subparser
if (len(sys.argv) == 2):
sys.argv.append("compare")
args = parser.parse_args()
#handle commands
if args.func == tessCopy:
args.func(args.tessDir, args.includeDir)
else:
args.func(args.tessDir)
if __name__ == '__main__' :
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Zdenko Podobný
# Author: Zdenko Podobný
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple python demo script of tesseract-ocr 3.02 c-api
"""
import os
import sys
import ctypes
# Demo variables
lang = "eng"
filename = "../phototest.tif"
libpath = "/usr/local/lib64/"
libpath_w = "../vs2008/DLL_Release/"
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
if not TESSDATA_PREFIX:
TESSDATA_PREFIX = "../"
if sys.platform == "win32":
libname = libpath_w + "libtesseract302.dll"
libname_alt = "libtesseract302.dll"
os.environ["PATH"] += os.pathsep + libpath_w
else:
libname = libpath + "libtesseract.so.3.0.2"
libname_alt = "libtesseract.so.3"
try:
tesseract = ctypes.cdll.LoadLibrary(libname)
except:
try:
tesseract = ctypes.cdll.LoadLibrary(libname_alt)
except WindowsError, err:
print("Trying to load '%s'..." % libname)
print("Trying to load '%s'..." % libname_alt)
print(err)
exit(1)
tesseract.TessVersion.restype = ctypes.c_char_p
tesseract_version = tesseract.TessVersion()[:4]
# We need to check library version because libtesseract.so.3 is symlink
# and can point to other version than 3.02
if float(tesseract_version) < 3.02:
print("Found tesseract-ocr library version %s." % tesseract_version)
print("C-API is present only in version 3.02!")
exit(2)
api = tesseract.TessBaseAPICreate()
rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang);
if (rc):
tesseract.TessBaseAPIDelete(api)
print("Could not initialize tesseract.\n")
exit(3)
text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0);
result_text = ctypes.string_at(text_out)
print result_text
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao (askxuefeng@gmail.com)'
import os
import cgi
import time
import logging
import simplejson
from datetime import date
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
from google.appengine.runtime import apiproxy_errors
from google.appengine.api import memcache
from google.appengine.api import users
from Cheetah.Template import Template
from autogen import CompiledTemplate
import weather
import store
def get_city(request):
# try get city from cookie:
if 'Cookie' in request.headers:
all = request.headers['Cookie']
if all:
cookies = all.split(';')
for cookie in cookies:
c = cookie.strip()
if c.startswith('city='):
return c[5:]
return None
def fetch_weather_in_cache(city):
data = memcache.get(str(city.code))
if data:
return data
data = fetch_weather(city)
if data is None:
return None
memcache.set(str(city.code), data, 3600)
return data
def fetch_weather(city):
data = fetch_rss(city.code)
if data is None:
return None
return str(weather.Weather(city.name, data))
def fetch_rss(code):
url = 'http://weather.yahooapis.com/forecastrss?w=%s' % code
logging.info('Fetch RSS: %s' % url)
try:
result = urlfetch.fetch(url, follow_redirects=False)
except (urlfetch.Error, apiproxy_errors.Error):
return None
if result.status_code!=200:
return None
return result.content
class XmppHandler(webapp.RequestHandler):
def post(self):
message = xmpp.Message(self.request.POST)
logging.info('XMPP from %s: %s' % (message.sender, message.body))
name = message.body.strip().lower()
if name=='':
message.reply(u'''噢,啥都不输,怎么知道您要查询的城市啊?
http://weather-china.appspot.com/
''')
return
city = store.find_city(name, return_default=False)
if city is None:
message.reply(u''':( 噢,没有找到您要查询的城市 "%s"。
http://weather-china.appspot.com/
''' % name)
return
json = fetch_weather_in_cache(city)
if json is None:
return message.reply(u''':( 对不起,网络故障,暂时无法查询,请过几分钟再试试。
http://weather-china.appspot.com/
''')
if isinstance(json, unicode):
json = json.encode('utf-8')
w = simplejson.loads(json, encoding='utf-8')
return message.reply(
u'''%s:
今日:%s,%s~%s度
明日:%s,%s~%s度
更详细的预报请查看 http://weather-china.appspot.com/?city=%s
''' % (
w[u'name'],
w[u'forecasts'][0][u'text'], w[u'forecasts'][0][u'low'], w[u'forecasts'][0][u'high'],
w[u'forecasts'][1][u'text'], w[u'forecasts'][1][u'low'], w[u'forecasts'][1][u'high'],
city.first_alias(),)
)
class HomeHandler(webapp.RequestHandler):
def get(self):
time_1 = time.time()
name = self.request.get('city', '')
if not name:
name = get_city(self.request)
if not name:
name = 'beijing'
cities = memcache.get('__cities__')
if cities is None:
cities = store.get_cities()
memcache.set('__cities__', cities, 3600)
city = None
for c in cities:
if c.name==name or name in c.aliases:
city = c
break
if city is None:
self.response.set_status(500)
return
today = date.today()
target = date(today.year+3, today.month, today.day)
expires = target.strftime('%a, %d-%b-%Y %H:%M:%S GMT')
self.response.headers['Set-Cookie'] = 'city=%s; expires=%s; path=/' % (city.first_alias(), expires)
time_2 = time.time()
t = CompiledTemplate(searchList=[{'city' : city, 'cities' : cities}])
self.response.out.write(t)
time_3 = time.time()
logging.info('Performance: %f / %f of rendering / total.' % (time_3-time_2, time_3-time_1))
class AdminHandler(webapp.RequestHandler):
def get(self):
login = self.get_login_url()
if login:
self.redirect(login)
return
action = self.request.get('action', '')
if action=='delete_city':
key = self.request.get('key')
store.delete_city(key)
self.redirect_admin()
return
if action=='':
cities = store.get_cities()
root = os.path.dirname(__file__)
t = Template(file=os.path.join(root, 'admin.html'), searchList=[{'cities' : cities}])
self.response.out.write(t)
return
self.response.set_status(400)
def post(self):
login = self.get_login_url()
if login:
self.redirect(login)
return
action = self.request.get('action')
if action=='create_city':
name = cgi.escape(self.request.get('name')).strip().lower()
aliases = [cgi.escape(x).lower() for x in self.request.get_all('aliases') if x.strip()]
code = int(self.request.get('code'))
store.create_city(name, aliases, code)
self.redirect_admin()
return
self.response.set_status(400)
def get_login_url(self):
if not users.is_current_user_admin():
return users.create_login_url('/admin')
return None
def redirect_admin(self):
self.redirect('/admin?t=%s' % time.time())
class ApiHandler(webapp.RequestHandler):
CACHE_TIME = 600 # 600 seconds
def get(self):
callback = ''
c = ''
extension = self.request.get('extension', '')
if extension=='chrome':
# detect city from cookie:
c = get_city(self.request)
if not c:
c = 'beijing'
else:
callback = cgi.escape(self.request.get('callback', '').strip())
c = cgi.escape(self.request.get('city', '')).lower()
if not c:
return self.send_error('MISSING_PARAMETER', 'Missing parameter \'city\'')
city = store.find_city(c, return_default=False)
if city is None:
return self.send_error('CITY_NOT_FOUND', 'City not found')
weather = fetch_weather_in_cache(city)
if weather is None:
return self.send_error('SERVICE_UNAVAILABLE', 'Service unavailable')
if callback:
if isinstance(callback, unicode):
callback = callback.encode('utf-8')
self.write_json('%s(%s);' % (callback, weather))
else:
self.write_json(weather)
def send_error(self, code, msg):
json = '{ "error" : "%s", "message" : "%s"}' % (code, msg)
self.write_json(json)
def write_json(self, json):
if isinstance(json, unicode):
json = json.encode('utf-8')
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(json)
application = webapp.WSGIApplication([
('^/$', HomeHandler),
('^/api$', ApiHandler),
('^/admin$', AdminHandler),
('^/_ah/xmpp/message/chat/$', XmppHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# $Id: CheetahWrapper.py,v 1.26 2007/10/02 01:22:04 tavis_rudd Exp $
"""Cheetah command-line interface.
2002-09-03 MSO: Total rewrite.
2002-09-04 MSO: Bugfix, compile command was using wrong output ext.
2002-11-08 MSO: Another rewrite.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>>
Version: $Revision: 1.26 $
Start Date: 2001/03/30
Last Revision Date: $Date: 2007/10/02 01:22:04 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>"
__revision__ = "$Revision: 1.26 $"[11:-2]
import getopt, glob, os, pprint, re, shutil, sys
import cPickle as pickle
from optparse import OptionParser
from Cheetah.Version import Version
from Cheetah.Template import Template, DEFAULT_COMPILER_SETTINGS
from Cheetah.Utils.Misc import mkdirsWithPyInitFiles
optionDashesRE = re.compile( R"^-{1,2}" )
moduleNameRE = re.compile( R"^[a-zA-Z_][a-zA-Z_0-9]*$" )
def fprintfMessage(stream, format, *args):
if format[-1:] == '^':
format = format[:-1]
else:
format += '\n'
if args:
message = format % args
else:
message = format
stream.write(message)
class Error(Exception):
pass
class Bundle:
"""Wrap the source, destination and backup paths in one neat little class.
Used by CheetahWrapper.getBundles().
"""
def __init__(self, **kw):
self.__dict__.update(kw)
def __repr__(self):
return "<Bundle %r>" % self.__dict__
##################################################
## USAGE FUNCTION & MESSAGES
def usage(usageMessage, errorMessage="", out=sys.stderr):
"""Write help text, an optional error message, and abort the program.
"""
out.write(WRAPPER_TOP)
out.write(usageMessage)
exitStatus = 0
if errorMessage:
out.write('\n')
out.write("*** USAGE ERROR ***: %s\n" % errorMessage)
exitStatus = 1
sys.exit(exitStatus)
WRAPPER_TOP = """\
__ ____________ __
\ \/ \/ /
\/ * * \/ CHEETAH %(Version)s Command-Line Tool
\ | /
\ ==----== / by Tavis Rudd <tavis@damnsimple.com>
\__________/ and Mike Orr <sluggoster@gmail.com>
""" % globals()
HELP_PAGE1 = """\
USAGE:
------
cheetah compile [options] [FILES ...] : Compile template definitions
cheetah fill [options] [FILES ...] : Fill template definitions
cheetah help : Print this help message
cheetah options : Print options help message
cheetah test [options] : Run Cheetah's regression tests
: (same as for unittest)
cheetah version : Print Cheetah version number
You may abbreviate the command to the first letter; e.g., 'h' == 'help'.
If FILES is a single "-", read standard input and write standard output.
Run "cheetah options" for the list of valid options.
"""
##################################################
## CheetahWrapper CLASS
class CheetahWrapper(object):
MAKE_BACKUPS = True
BACKUP_SUFFIX = ".bak"
_templateClass = None
_compilerSettings = None
def __init__(self):
self.progName = None
self.command = None
self.opts = None
self.pathArgs = None
self.sourceFiles = []
self.searchList = []
self.parser = None
##################################################
## MAIN ROUTINE
def main(self, argv=None):
"""The main program controller."""
if argv is None:
argv = sys.argv
# Step 1: Determine the command and arguments.
try:
self.progName = progName = os.path.basename(argv[0])
self.command = command = optionDashesRE.sub("", argv[1])
if command == 'test':
self.testOpts = argv[2:]
else:
self.parseOpts(argv[2:])
except IndexError:
usage(HELP_PAGE1, "not enough command-line arguments")
# Step 2: Call the command
meths = (self.compile, self.fill, self.help, self.options,
self.test, self.version)
for meth in meths:
methName = meth.__name__
# Or meth.im_func.func_name
# Or meth.func_name (Python >= 2.1 only, sometimes works on 2.0)
methInitial = methName[0]
if command in (methName, methInitial):
sys.argv[0] += (" " + methName)
# @@MO: I don't necessarily agree sys.argv[0] should be
# modified.
meth()
return
# If none of the commands matched.
usage(HELP_PAGE1, "unknown command '%s'" % command)
def parseOpts(self, args):
C, D, W = self.chatter, self.debug, self.warn
self.isCompile = isCompile = self.command[0] == 'c'
defaultOext = isCompile and ".py" or ".html"
self.parser = OptionParser()
pao = self.parser.add_option
pao("--idir", action="store", dest="idir", default='', help='Input directory (defaults to current directory)')
pao("--odir", action="store", dest="odir", default="", help='Output directory (defaults to current directory)')
pao("--iext", action="store", dest="iext", default=".tmpl", help='File input extension (defaults: compile: .tmpl, fill: .tmpl)')
pao("--oext", action="store", dest="oext", default=defaultOext, help='File output extension (defaults: compile: .py, fill: .html)')
pao("-R", action="store_true", dest="recurse", default=False, help='Recurse through subdirectories looking for input files')
pao("--stdout", "-p", action="store_true", dest="stdout", default=False, help='Send output to stdout instead of writing to a file')
pao("--quiet", action="store_false", dest="verbose", default=True, help='Do not print informational messages to stdout')
pao("--debug", action="store_true", dest="debug", default=False, help='Print diagnostic/debug information to stderr')
pao("--env", action="store_true", dest="env", default=False, help='Pass the environment into the search list')
pao("--pickle", action="store", dest="pickle", default="", help='Unpickle FILE and pass it through in the search list')
pao("--flat", action="store_true", dest="flat", default=False, help='Do not build destination subdirectories')
pao("--nobackup", action="store_true", dest="nobackup", default=False, help='Do not make backup files when generating new ones')
pao("--settings", action="store", dest="compilerSettingsString", default=None, help='String of compiler settings to pass through, e.g. --settings="useNameMapper=False,useFilters=False"')
pao('--print-settings', action='store_true', dest='print_settings', help='Print out the list of available compiler settings')
pao("--templateAPIClass", action="store", dest="templateClassName", default=None, help='Name of a subclass of Cheetah.Template.Template to use for compilation, e.g. MyTemplateClass')
pao("--parallel", action="store", type="int", dest="parallel", default=1, help='Compile/fill templates in parallel, e.g. --parallel=4')
pao('--shbang', dest='shbang', default='#!/usr/bin/env python', help='Specify the shbang to place at the top of compiled templates, e.g. --shbang="#!/usr/bin/python2.6"')
opts, files = self.parser.parse_args(args)
self.opts = opts
if sys.platform == "win32":
new_files = []
for spec in files:
file_list = glob.glob(spec)
if file_list:
new_files.extend(file_list)
else:
new_files.append(spec)
files = new_files
self.pathArgs = files
D("""\
cheetah compile %s
Options are
%s
Files are %s""", args, pprint.pformat(vars(opts)), files)
if opts.print_settings:
print()
print('>> Available Cheetah compiler settings:')
from Cheetah.Compiler import _DEFAULT_COMPILER_SETTINGS
listing = _DEFAULT_COMPILER_SETTINGS
listing.sort(key=lambda l: l[0][0].lower())
for l in listing:
print('\t%s (default: "%s")\t%s' % l)
sys.exit(0)
#cleanup trailing path separators
seps = [sep for sep in [os.sep, os.altsep] if sep]
for attr in ['idir', 'odir']:
for sep in seps:
path = getattr(opts, attr, None)
if path and path.endswith(sep):
path = path[:-len(sep)]
setattr(opts, attr, path)
break
self._fixExts()
if opts.env:
self.searchList.insert(0, os.environ)
if opts.pickle:
f = open(opts.pickle, 'rb')
unpickled = pickle.load(f)
f.close()
self.searchList.insert(0, unpickled)
##################################################
## COMMAND METHODS
def compile(self):
self._compileOrFill()
def fill(self):
from Cheetah.ImportHooks import install
install()
self._compileOrFill()
def help(self):
usage(HELP_PAGE1, "", sys.stdout)
def options(self):
return self.parser.print_help()
def test(self):
# @@MO: Ugly kludge.
TEST_WRITE_FILENAME = 'cheetah_test_file_creation_ability.tmp'
try:
f = open(TEST_WRITE_FILENAME, 'w')
except:
sys.exit("""\
Cannot run the tests because you don't have write permission in the current
directory. The tests need to create temporary files. Change to a directory
you do have write permission to and re-run the tests.""")
else:
f.close()
os.remove(TEST_WRITE_FILENAME)
# @@MO: End ugly kludge.
from Cheetah.Tests import Test
import unittest
verbosity = 1
if '-q' in self.testOpts:
verbosity = 0
if '-v' in self.testOpts:
verbosity = 2
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(unittest.TestSuite(Test.suites))
def version(self):
print(Version)
# If you add a command, also add it to the 'meths' variable in main().
##################################################
## LOGGING METHODS
def chatter(self, format, *args):
"""Print a verbose message to stdout. But don't if .opts.stdout is
true or .opts.verbose is false.
"""
if self.opts.stdout or not self.opts.verbose:
return
fprintfMessage(sys.stdout, format, *args)
def debug(self, format, *args):
"""Print a debugging message to stderr, but don't if .debug is
false.
"""
if self.opts.debug:
fprintfMessage(sys.stderr, format, *args)
def warn(self, format, *args):
"""Always print a warning message to stderr.
"""
fprintfMessage(sys.stderr, format, *args)
def error(self, format, *args):
"""Always print a warning message to stderr and exit with an error code.
"""
fprintfMessage(sys.stderr, format, *args)
sys.exit(1)
##################################################
## HELPER METHODS
def _fixExts(self):
assert self.opts.oext, "oext is empty!"
iext, oext = self.opts.iext, self.opts.oext
if iext and not iext.startswith("."):
self.opts.iext = "." + iext
if oext and not oext.startswith("."):
self.opts.oext = "." + oext
def _compileOrFill(self):
C, D, W = self.chatter, self.debug, self.warn
opts, files = self.opts, self.pathArgs
if files == ["-"]:
self._compileOrFillStdin()
return
elif not files and opts.recurse:
which = opts.idir and "idir" or "current"
C("Drilling down recursively from %s directory.", which)
sourceFiles = []
dir = os.path.join(self.opts.idir, os.curdir)
os.path.walk(dir, self._expandSourceFilesWalk, sourceFiles)
elif not files:
usage(HELP_PAGE1, "Neither files nor -R specified!")
else:
sourceFiles = self._expandSourceFiles(files, opts.recurse, True)
sourceFiles = [os.path.normpath(x) for x in sourceFiles]
D("All source files found: %s", sourceFiles)
bundles = self._getBundles(sourceFiles)
D("All bundles: %s", pprint.pformat(bundles))
if self.opts.flat:
self._checkForCollisions(bundles)
# In parallel mode a new process is forked for each template
# compilation, out of a pool of size self.opts.parallel. This is not
# really optimal in all cases (e.g. probably wasteful for small
# templates), but seems to work well in real life for me.
#
# It also won't work for Windows users, but I'm not going to lose any
# sleep over that.
if self.opts.parallel > 1:
bad_child_exit = 0
pid_pool = set()
def child_wait():
pid, status = os.wait()
pid_pool.remove(pid)
return os.WEXITSTATUS(status)
while bundles:
b = bundles.pop()
pid = os.fork()
if pid:
pid_pool.add(pid)
else:
self._compileOrFillBundle(b)
sys.exit(0)
if len(pid_pool) == self.opts.parallel:
bad_child_exit = child_wait()
if bad_child_exit:
break
while pid_pool:
child_exit = child_wait()
if not bad_child_exit:
bad_child_exit = child_exit
if bad_child_exit:
sys.exit("Child process failed, exited with code %d" % bad_child_exit)
else:
for b in bundles:
self._compileOrFillBundle(b)
def _checkForCollisions(self, bundles):
"""Check for multiple source paths writing to the same destination
path.
"""
C, D, W = self.chatter, self.debug, self.warn
isError = False
dstSources = {}
for b in bundles:
if b.dst in dstSources:
dstSources[b.dst].append(b.src)
else:
dstSources[b.dst] = [b.src]
keys = sorted(dstSources.keys())
for dst in keys:
sources = dstSources[dst]
if len(sources) > 1:
isError = True
sources.sort()
fmt = "Collision: multiple source files %s map to one destination file %s"
W(fmt, sources, dst)
if isError:
what = self.isCompile and "Compilation" or "Filling"
sys.exit("%s aborted due to collisions" % what)
def _expandSourceFilesWalk(self, arg, dir, files):
"""Recursion extension for .expandSourceFiles().
This method is a callback for os.path.walk().
'arg' is a list to which successful paths will be appended.
"""
iext = self.opts.iext
for f in files:
path = os.path.join(dir, f)
if path.endswith(iext) and os.path.isfile(path):
arg.append(path)
elif os.path.islink(path) and os.path.isdir(path):
os.path.walk(path, self._expandSourceFilesWalk, arg)
# If is directory, do nothing; 'walk' will eventually get it.
def _expandSourceFiles(self, files, recurse, addIextIfMissing):
"""Calculate source paths from 'files' by applying the
command-line options.
"""
C, D, W = self.chatter, self.debug, self.warn
idir = self.opts.idir
iext = self.opts.iext
files = []
for f in self.pathArgs:
oldFilesLen = len(files)
D("Expanding %s", f)
path = os.path.join(idir, f)
pathWithExt = path + iext # May or may not be valid.
if os.path.isdir(path):
if recurse:
os.path.walk(path, self._expandSourceFilesWalk, files)
else:
raise Error("source file '%s' is a directory" % path)
elif os.path.isfile(path):
files.append(path)
elif (addIextIfMissing and not path.endswith(iext) and
os.path.isfile(pathWithExt)):
files.append(pathWithExt)
# Do not recurse directories discovered by iext appending.
elif os.path.exists(path):
W("Skipping source file '%s', not a plain file.", path)
else:
W("Skipping source file '%s', not found.", path)
if len(files) > oldFilesLen:
D(" ... found %s", files[oldFilesLen:])
return files
def _getBundles(self, sourceFiles):
flat = self.opts.flat
idir = self.opts.idir
iext = self.opts.iext
nobackup = self.opts.nobackup
odir = self.opts.odir
oext = self.opts.oext
idirSlash = idir + os.sep
bundles = []
for src in sourceFiles:
# 'base' is the subdirectory plus basename.
base = src
if idir and src.startswith(idirSlash):
base = src[len(idirSlash):]
if iext and base.endswith(iext):
base = base[:-len(iext)]
basename = os.path.basename(base)
if flat:
dst = os.path.join(odir, basename + oext)
else:
dbn = basename
if odir and base.startswith(os.sep):
odd = odir
while odd != '':
idx = base.find(odd)
if idx == 0:
dbn = base[len(odd):]
if dbn[0] == '/':
dbn = dbn[1:]
break
odd = os.path.dirname(odd)
if odd == '/':
break
dst = os.path.join(odir, dbn + oext)
else:
dst = os.path.join(odir, base + oext)
bak = dst + self.BACKUP_SUFFIX
b = Bundle(src=src, dst=dst, bak=bak, base=base, basename=basename)
bundles.append(b)
return bundles
def _getTemplateClass(self):
C, D, W = self.chatter, self.debug, self.warn
modname = None
if self._templateClass:
return self._templateClass
modname = self.opts.templateClassName
if not modname:
return Template
p = modname.rfind('.')
if ':' not in modname:
self.error('The value of option --templateAPIClass is invalid\n'
'It must be in the form "module:class", '
'e.g. "Cheetah.Template:Template"')
modname, classname = modname.split(':')
C('using --templateAPIClass=%s:%s'%(modname, classname))
if p >= 0:
mod = getattr(__import__(modname[:p], {}, {}, [modname[p+1:]]), modname[p+1:])
else:
mod = __import__(modname, {}, {}, [])
klass = getattr(mod, classname, None)
if klass:
self._templateClass = klass
return klass
else:
self.error('**Template class specified in option --templateAPIClass not found\n'
'**Falling back on Cheetah.Template:Template')
def _getCompilerSettings(self):
if self._compilerSettings:
return self._compilerSettings
def getkws(**kws):
return kws
if self.opts.compilerSettingsString:
try:
exec('settings = getkws(%s)'%self.opts.compilerSettingsString)
except:
self.error("There's an error in your --settings option."
"It must be valid Python syntax.\n"
+" --settings='%s'\n"%self.opts.compilerSettingsString
+" %s: %s"%sys.exc_info()[:2]
)
validKeys = DEFAULT_COMPILER_SETTINGS.keys()
if [k for k in settings.keys() if k not in validKeys]:
self.error(
'The --setting "%s" is not a valid compiler setting name.'%k)
self._compilerSettings = settings
return settings
else:
return {}
def _compileOrFillStdin(self):
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
if self.isCompile:
pysrc = TemplateClass.compile(file=sys.stdin,
compilerSettings=compilerSettings,
returnAClass=False)
output = pysrc
else:
output = str(TemplateClass(file=sys.stdin, compilerSettings=compilerSettings))
sys.stdout.write(output)
def _compileOrFillBundle(self, b):
C, D, W = self.chatter, self.debug, self.warn
TemplateClass = self._getTemplateClass()
compilerSettings = self._getCompilerSettings()
src = b.src
dst = b.dst
base = b.base
basename = b.basename
dstDir = os.path.dirname(dst)
what = self.isCompile and "Compiling" or "Filling"
C("%s %s -> %s^", what, src, dst) # No trailing newline.
if os.path.exists(dst) and not self.opts.nobackup:
bak = b.bak
C(" (backup %s)", bak) # On same line as previous message.
else:
bak = None
C("")
if self.isCompile:
if not moduleNameRE.match(basename):
tup = basename, src
raise Error("""\
%s: base name %s contains invalid characters. It must
be named according to the same rules as Python modules.""" % tup)
pysrc = TemplateClass.compile(file=src, returnAClass=False,
moduleName=basename,
className=basename,
commandlineopts=self.opts,
compilerSettings=compilerSettings)
output = pysrc
else:
#output = str(TemplateClass(file=src, searchList=self.searchList))
tclass = TemplateClass.compile(file=src, compilerSettings=compilerSettings)
output = str(tclass(searchList=self.searchList))
if bak:
shutil.copyfile(dst, bak)
if dstDir and not os.path.exists(dstDir):
if self.isCompile:
mkdirsWithPyInitFiles(dstDir)
else:
os.makedirs(dstDir)
if self.opts.stdout:
sys.stdout.write(output)
else:
f = open(dst, 'w')
f.write(output)
f.close()
# Called when invoked as `cheetah`
def _cheetah():
CheetahWrapper().main()
# Called when invoked as `cheetah-compile`
def _cheetah_compile():
sys.argv.insert(1, "compile")
CheetahWrapper().main()
##################################################
## if run from the command line
if __name__ == '__main__': CheetahWrapper().main()
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
'''
Provides an abstract Servlet baseclass for Cheetah's Template class
'''
import sys
import os.path
isWebwareInstalled = False
try:
try:
from ds.appserver.Servlet import Servlet as BaseServlet
except:
from WebKit.Servlet import Servlet as BaseServlet
isWebwareInstalled = True
if not issubclass(BaseServlet, object):
class NewStyleBaseServlet(BaseServlet, object):
pass
BaseServlet = NewStyleBaseServlet
except:
class BaseServlet(object):
_reusable = 1
_threadSafe = 0
def awake(self, transaction):
pass
def sleep(self, transaction):
pass
def shutdown(self):
pass
##################################################
## CLASSES
class Servlet(BaseServlet):
"""This class is an abstract baseclass for Cheetah.Template.Template.
It wraps WebKit.Servlet and provides a few extra convenience methods that
are also found in WebKit.Page. It doesn't do any of the HTTP method
resolution that is done in WebKit.HTTPServlet
"""
transaction = None
application = None
request = None
session = None
def __init__(self, *args, **kwargs):
super(Servlet, self).__init__(*args, **kwargs)
# this default will be changed by the .awake() method
self._CHEETAH__isControlledByWebKit = False
## methods called by Webware during the request-response
def awake(self, transaction):
super(Servlet, self).awake(transaction)
# a hack to signify that the servlet is being run directly from WebKit
self._CHEETAH__isControlledByWebKit = True
self.transaction = transaction
#self.application = transaction.application
self.response = response = transaction.response
self.request = transaction.request
# Temporary hack to accomodate bug in
# WebKit.Servlet.Servlet.serverSidePath: it uses
# self._request even though this attribute does not exist.
# This attribute WILL disappear in the future.
self._request = transaction.request()
self.session = transaction.session
self.write = response().write
#self.writeln = response.writeln
def respond(self, trans=None):
raise NotImplementedError("""\
couldn't find the template's main method. If you are using #extends
without #implements, try adding '#implements respond' to your template
definition.""")
def sleep(self, transaction):
super(Servlet, self).sleep(transaction)
self.session = None
self.request = None
self._request = None
self.response = None
self.transaction = None
def shutdown(self):
pass
def serverSidePath(self, path=None,
normpath=os.path.normpath,
abspath=os.path.abspath
):
if self._CHEETAH__isControlledByWebKit:
return super(Servlet, self).serverSidePath(path)
elif path:
return normpath(abspath(path.replace("\\", '/')))
elif hasattr(self, '_filePath') and self._filePath:
return normpath(abspath(self._filePath))
else:
return None
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
# $Id: TemplateCmdLineIface.py,v 1.13 2006/01/10 20:34:35 tavis_rudd Exp $
"""Provides a command line interface to compiled Cheetah template modules.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.13 $
Start Date: 2001/12/06
Last Revision Date: $Date: 2006/01/10 20:34:35 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.13 $"[11:-2]
import sys
import os
import getopt
import os.path
try:
from cPickle import load
except ImportError:
from pickle import load
from Cheetah.Version import Version
class Error(Exception):
pass
class CmdLineIface:
"""A command line interface to compiled Cheetah template modules."""
def __init__(self, templateObj,
scriptName=os.path.basename(sys.argv[0]),
cmdLineArgs=sys.argv[1:]):
self._template = templateObj
self._scriptName = scriptName
self._cmdLineArgs = cmdLineArgs
def run(self):
"""The main program controller."""
self._processCmdLineArgs()
print(self._template)
def _processCmdLineArgs(self):
try:
self._opts, self._args = getopt.getopt(
self._cmdLineArgs, 'h', ['help',
'env',
'pickle=',
])
except getopt.GetoptError, v:
# print help information and exit:
print(v)
print(self.usage())
sys.exit(2)
for o, a in self._opts:
if o in ('-h', '--help'):
print(self.usage())
sys.exit()
if o == '--env':
self._template.searchList().insert(0, os.environ)
if o == '--pickle':
if a == '-':
unpickled = load(sys.stdin)
self._template.searchList().insert(0, unpickled)
else:
f = open(a)
unpickled = load(f)
f.close()
self._template.searchList().insert(0, unpickled)
def usage(self):
return """Cheetah %(Version)s template module command-line interface
Usage
-----
%(scriptName)s [OPTION]
Options
-------
-h, --help Print this help information
--env Use shell ENVIRONMENT variables to fill the
$placeholders in the template.
--pickle <file> Use a variables from a dictionary stored in Python
pickle file to fill $placeholders in the template.
If <file> is - stdin is used:
'%(scriptName)s --pickle -'
Description
-----------
This interface allows you to execute a Cheetah template from the command line
and collect the output. It can prepend the shell ENVIRONMENT or a pickled
Python dictionary to the template's $placeholder searchList, overriding the
defaults for the $placeholders.
""" % {'scriptName': self._scriptName,
'Version': Version,
}
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
# $Id: ErrorCatchers.py,v 1.7 2005/01/03 19:59:07 tavis_rudd Exp $
"""ErrorCatcher class for Cheetah Templates
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.7 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2005/01/03 19:59:07 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.7 $"[11:-2]
import time
from Cheetah.NameMapper import NotFound
class Error(Exception):
pass
class ErrorCatcher:
_exceptionsToCatch = (NotFound,)
def __init__(self, templateObj):
pass
def exceptions(self):
return self._exceptionsToCatch
def warn(self, exc_val, code, rawCode, lineCol):
return rawCode
## make an alias
Echo = ErrorCatcher
class BigEcho(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
return "="*15 + "<" + rawCode + " could not be found>" + "="*15
class KeyError(ErrorCatcher):
def warn(self, exc_val, code, rawCode, lineCol):
raise KeyError("no '%s' in this Template Object's Search List" % rawCode)
class ListErrors(ErrorCatcher):
"""Accumulate a list of errors."""
_timeFormat = "%c"
def __init__(self, templateObj):
ErrorCatcher.__init__(self, templateObj)
self._errors = []
def warn(self, exc_val, code, rawCode, lineCol):
dict = locals().copy()
del dict['self']
dict['time'] = time.strftime(self._timeFormat,
time.localtime(time.time()))
self._errors.append(dict)
return rawCode
def listErrors(self):
"""Return the list of errors."""
return self._errors
| Python |
"""
Nothing, but in a friendly way. Good for filling in for objects you want to
hide. If $form.f1 is a RecursiveNull object, then
$form.f1.anything["you"].might("use") will resolve to the empty string.
This module was contributed by Ian Bicking.
"""
class RecursiveNull(object):
def __getattr__(self, attr):
return self
def __getitem__(self, item):
return self
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return ''
def __repr__(self):
return ''
def __nonzero__(self):
return 0
def __eq__(self, x):
if x:
return False
return True
def __ne__(self, x):
return x and True or False
| Python |
"""
@@TR: This code is pretty much unsupported.
MondoReport.py -- Batching module for Python and Cheetah.
Version 2001-Nov-18. Doesn't do much practical yet, but the companion
testMondoReport.py passes all its tests.
-Mike Orr (Iron)
TODO: BatchRecord.prev/next/prev_batches/next_batches/query, prev.query,
next.query.
How about Report: .page(), .all(), .summary()? Or PageBreaker.
"""
import operator
try:
from functools import reduce
except ImportError:
# If functools doesn't exist, we must be on an old
# enough version that has reduce() in builtins
pass
try:
from Cheetah.NameMapper import valueForKey as lookup_func
except ImportError:
def lookup_func(obj, name):
if hasattr(obj, name):
return getattr(obj, name)
else:
return obj[name] # Raises KeyError.
########## PUBLIC GENERIC FUNCTIONS ##############################
class NegativeError(ValueError):
pass
def isNumeric(v):
return isinstance(v, (int, float))
def isNonNegative(v):
ret = isNumeric(v)
if ret and v < 0:
raise NegativeError(v)
def isNotNone(v):
return v is not None
def Roman(n):
n = int(n) # Raises TypeError.
if n < 1:
raise ValueError("roman numeral for zero or negative undefined: " + n)
roman = ''
while n >= 1000:
n = n - 1000
roman = roman + 'M'
while n >= 500:
n = n - 500
roman = roman + 'D'
while n >= 100:
n = n - 100
roman = roman + 'C'
while n >= 50:
n = n - 50
roman = roman + 'L'
while n >= 10:
n = n - 10
roman = roman + 'X'
while n >= 5:
n = n - 5
roman = roman + 'V'
while n < 5 and n >= 1:
n = n - 1
roman = roman + 'I'
roman = roman.replace('DCCCC', 'CM')
roman = roman.replace('CCCC', 'CD')
roman = roman.replace('LXXXX', 'XC')
roman = roman.replace('XXXX', 'XL')
roman = roman.replace('VIIII', 'IX')
roman = roman.replace('IIII', 'IV')
return roman
def sum(lis):
return reduce(operator.add, lis, 0)
def mean(lis):
"""Always returns a floating-point number.
"""
lis_len = len(lis)
if lis_len == 0:
return 0.00 # Avoid ZeroDivisionError (not raised for floats anyway)
total = float( sum(lis) )
return total / lis_len
def median(lis):
lis = sorted(lis[:])
return lis[int(len(lis)/2)]
def variance(lis):
raise NotImplementedError()
def variance_n(lis):
raise NotImplementedError()
def standardDeviation(lis):
raise NotImplementedError()
def standardDeviation_n(lis):
raise NotImplementedError()
class IndexFormats:
"""Eight ways to display a subscript index.
("Fifty ways to leave your lover....")
"""
def __init__(self, index, item=None):
self._index = index
self._number = index + 1
self._item = item
def index(self):
return self._index
__call__ = index
def number(self):
return self._number
def even(self):
return self._number % 2 == 0
def odd(self):
return not self.even()
def even_i(self):
return self._index % 2 == 0
def odd_i(self):
return not self.even_i()
def letter(self):
return self.Letter().lower()
def Letter(self):
n = ord('A') + self._index
return chr(n)
def roman(self):
return self.Roman().lower()
def Roman(self):
return Roman(self._number)
def item(self):
return self._item
########## PRIVATE CLASSES ##############################
class ValuesGetterMixin:
def __init__(self, origList):
self._origList = origList
def _getValues(self, field=None, criteria=None):
if field:
ret = [lookup_func(elm, field) for elm in self._origList]
else:
ret = self._origList
if criteria:
ret = list(filter(criteria, ret))
return ret
class RecordStats(IndexFormats, ValuesGetterMixin):
"""The statistics that depend on the current record.
"""
def __init__(self, origList, index):
record = origList[index] # Raises IndexError.
IndexFormats.__init__(self, index, record)
ValuesGetterMixin.__init__(self, origList)
def length(self):
return len(self._origList)
def first(self):
return self._index == 0
def last(self):
return self._index >= len(self._origList) - 1
def _firstOrLastValue(self, field, currentIndex, otherIndex):
currentValue = self._origList[currentIndex] # Raises IndexError.
try:
otherValue = self._origList[otherIndex]
except IndexError:
return True
if field:
currentValue = lookup_func(currentValue, field)
otherValue = lookup_func(otherValue, field)
return currentValue != otherValue
def firstValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index - 1)
def lastValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index + 1)
# firstPage and lastPage not implemented. Needed?
def percentOfTotal(self, field=None, suffix='%', default='N/A', decimals=2):
rec = self._origList[self._index]
if field:
val = lookup_func(rec, field)
else:
val = rec
try:
lis = self._getValues(field, isNumeric)
except NegativeError:
return default
total = sum(lis)
if total == 0.00: # Avoid ZeroDivisionError.
return default
val = float(val)
try:
percent = (val / total) * 100
except ZeroDivisionError:
return default
if decimals == 0:
percent = int(percent)
else:
percent = round(percent, decimals)
if suffix:
return str(percent) + suffix # String.
else:
return percent # Numeric.
def __call__(self): # Overrides IndexFormats.__call__
"""This instance is not callable, so we override the super method.
"""
raise NotImplementedError()
def prev(self):
if self._index == 0:
return None
else:
length = self.length()
start = self._index - length
return PrevNextPage(self._origList, length, start)
def next(self):
if self._index + self.length() == self.length():
return None
else:
length = self.length()
start = self._index + length
return PrevNextPage(self._origList, length, start)
def prevPages(self):
raise NotImplementedError()
def nextPages(self):
raise NotImplementedError()
prev_batches = prevPages
next_batches = nextPages
def summary(self):
raise NotImplementedError()
def _prevNextHelper(self, start, end, size, orphan, sequence):
"""Copied from Zope's DT_InSV.py's "opt" function.
"""
if size < 1:
if start > 0 and end > 0 and end >= start:
size=end+1-start
else: size=7
if start > 0:
try: sequence[start-1]
except: start=len(sequence)
# if start > l: start=l
if end > 0:
if end < start: end=start
else:
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
elif end > 0:
try: sequence[end-1]
except: end=len(sequence)
# if end > l: end=l
start=end+1-size
if start - 1 < orphan: start=1
else:
start=1
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
return start, end, size
class Summary(ValuesGetterMixin):
"""The summary statistics, that don't depend on the current record.
"""
def __init__(self, origList):
ValuesGetterMixin.__init__(self, origList)
def sum(self, field=None):
lis = self._getValues(field, isNumeric)
return sum(lis)
total = sum
def count(self, field=None):
lis = self._getValues(field, isNotNone)
return len(lis)
def min(self, field=None):
lis = self._getValues(field, isNotNone)
return min(lis) # Python builtin function min.
def max(self, field=None):
lis = self._getValues(field, isNotNone)
return max(lis) # Python builtin function max.
def mean(self, field=None):
"""Always returns a floating point number.
"""
lis = self._getValues(field, isNumeric)
return mean(lis)
average = mean
def median(self, field=None):
lis = self._getValues(field, isNumeric)
return median(lis)
def variance(self, field=None):
raiseNotImplementedError()
def variance_n(self, field=None):
raiseNotImplementedError()
def standardDeviation(self, field=None):
raiseNotImplementedError()
def standardDeviation_n(self, field=None):
raiseNotImplementedError()
class PrevNextPage:
def __init__(self, origList, size, start):
end = start + size
self.start = IndexFormats(start, origList[start])
self.end = IndexFormats(end, origList[end])
self.length = size
########## MAIN PUBLIC CLASS ##############################
class MondoReport:
_RecordStatsClass = RecordStats
_SummaryClass = Summary
def __init__(self, origlist):
self._origList = origlist
def page(self, size, start, overlap=0, orphan=0):
"""Returns list of ($r, $a, $b)
"""
if overlap != 0:
raise NotImplementedError("non-zero overlap")
if orphan != 0:
raise NotImplementedError("non-zero orphan")
origList = self._origList
origList_len = len(origList)
start = max(0, start)
end = min( start + size, len(self._origList) )
mySlice = origList[start:end]
ret = []
for rel in range(size):
abs_ = start + rel
r = mySlice[rel]
a = self._RecordStatsClass(origList, abs_)
b = self._RecordStatsClass(mySlice, rel)
tup = r, a, b
ret.append(tup)
return ret
batch = page
def all(self):
origList_len = len(self._origList)
return self.page(origList_len, 0, 0, 0)
def summary(self):
return self._SummaryClass(self._origList)
"""
**********************************
Return a pageful of records from a sequence, with statistics.
in : origlist, list or tuple. The entire set of records. This is
usually a list of objects or a list of dictionaries.
page, int >= 0. Which page to display.
size, int >= 1. How many records per page.
widow, int >=0. Not implemented.
orphan, int >=0. Not implemented.
base, int >=0. Number of first page (usually 0 or 1).
out: list of (o, b) pairs. The records for the current page. 'o' is
the original element from 'origlist' unchanged. 'b' is a Batch
object containing meta-info about 'o'.
exc: IndexError if 'page' or 'size' is < 1. If 'origlist' is empty or
'page' is too high, it returns an empty list rather than raising
an error.
origlist_len = len(origlist)
start = (page + base) * size
end = min(start + size, origlist_len)
ret = []
# widow, orphan calculation: adjust 'start' and 'end' up and down,
# Set 'widow', 'orphan', 'first_nonwidow', 'first_nonorphan' attributes.
for i in range(start, end):
o = origlist[i]
b = Batch(origlist, size, i)
tup = o, b
ret.append(tup)
return ret
def prev(self):
# return a PrevNextPage or None
def next(self):
# return a PrevNextPage or None
def prev_batches(self):
# return a list of SimpleBatch for the previous batches
def next_batches(self):
# return a list of SimpleBatch for the next batches
########## PUBLIC MIXIN CLASS FOR CHEETAH TEMPLATES ##############
class MondoReportMixin:
def batch(self, origList, size=None, start=0, overlap=0, orphan=0):
bat = MondoReport(origList)
return bat.batch(size, start, overlap, orphan)
def batchstats(self, origList):
bat = MondoReport(origList)
return bat.stats()
"""
# vim: shiftwidth=4 tabstop=4 expandtab textwidth=79
| Python |
"""This package contains classes, functions, objects and packages contributed
by Cheetah users. They are not used by Cheetah itself. There is no
guarantee that this directory will be included in Cheetah releases, that
these objects will remain here forever, or that they will remain
backward-compatible.
"""
# vim: shiftwidth=5 tabstop=5 expandtab
| Python |
# $Id: CGITemplate.py,v 1.6 2006/01/29 02:09:59 tavis_rudd Exp $
"""A subclass of Cheetah.Template for use in CGI scripts.
Usage in a template:
#extends Cheetah.Tools.CGITemplate
#implements respond
$cgiHeaders#slurp
Usage in a template inheriting a Python class:
1. The template
#extends MyPythonClass
#implements respond
$cgiHeaders#slurp
2. The Python class
from Cheetah.Tools import CGITemplate
class MyPythonClass(CGITemplate):
def cgiHeadersHook(self):
return "Content-Type: text/html; charset=koi8-r\n\n"
To read GET/POST variables, use the .webInput method defined in
Cheetah.Utils.WebInputMixin (available in all templates without importing
anything), use Python's 'cgi' module, or make your own arrangements.
This class inherits from Cheetah.Template to make it usable in Cheetah's
single-inheritance model.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.6 $
Start Date: 2001/10/03
Last Revision Date: $Date: 2006/01/29 02:09:59 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.6 $"[11:-2]
import os
from Cheetah.Template import Template
class CGITemplate(Template):
"""Methods useful in CGI scripts.
Any class that inherits this mixin must also inherit Cheetah.Servlet.
"""
def cgiHeaders(self):
"""Outputs the CGI headers if this is a CGI script.
Usage: $cgiHeaders#slurp
Override .cgiHeadersHook() if you want to customize the headers.
"""
if self.isCgi():
return self.cgiHeadersHook()
def cgiHeadersHook(self):
"""Override if you want to customize the CGI headers.
"""
return "Content-type: text/html\n\n"
def isCgi(self):
"""Is this a CGI script?
"""
env = 'REQUEST_METHOD' in os.environ
wk = self._CHEETAH__isControlledByWebKit
return env and not wk
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
# $Id: SiteHierarchy.py,v 1.1 2001/10/11 03:25:54 tavis_rudd Exp $
"""Create menus and crumbs from a site hierarchy.
You define the site hierarchy as lists/tuples. Each location in the hierarchy
is a (url, description) tuple. Each list has the base URL/text in the 0
position, and all the children coming after it. Any child can be a list,
representing further depth to the hierarchy. See the end of the file for an
example hierarchy.
Use Hierarchy(contents, currentURL), where contents is this hierarchy, and
currentURL is the position you are currently in. The menubar and crumbs methods
give you the HTML output.
There are methods you can override to customize the HTML output.
"""
##################################################
## DEPENDENCIES
import string
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
##################################################
## CLASSES
class Hierarchy:
def __init__(self, hierarchy, currentURL, prefix='', menuCSSClass=None,
crumbCSSClass=None):
"""
hierarchy is described above, currentURL should be somewhere in
the hierarchy. prefix will be added before all of the URLs (to
help mitigate the problems with absolute URLs), and if given,
cssClass will be used for both links *and* nonlinks.
"""
self._contents = hierarchy
self._currentURL = currentURL
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
else:
self._menuCSSClass = ''
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
else:
self._crumbCSSClass = ''
self._prefix=prefix
## Main output methods
def menuList(self, menuCSSClass=None):
"""An indented menu list"""
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
stream = StringIO()
for item in self._contents[1:]:
self._menubarRecurse(item, 0, stream)
return stream.getvalue()
def crumbs(self, crumbCSSClass=None):
"""The home>where>you>are crumbs"""
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
path = []
pos = self._contents
while True:
## This is not the fastest algorithm, I'm afraid.
## But it probably won't be for a huge hierarchy anyway.
foundAny = False
path.append(pos[0])
for item in pos[1:]:
if self._inContents(item):
if isinstance(item, tuple):
path.append(item)
break
else:
pos = item
foundAny = True
break
if not foundAny:
break
if len(path) == 1:
return self.emptyCrumb()
return string.join(map(lambda x, self=self: self.crumbLink(x[0], x[1]),
path), self.crumbSeperator()) + \
self.crumbTerminator()
## Methods to control the Aesthetics
# - override these methods for your own look
def menuLink(self, url, text, indent):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '%s<B%s>%s</B> <BR>\n' % (' '*2*indent,
self._menuCSSClass, text)
else:
return '%s<A HREF="%s%s"%s>%s</A> <BR>\n' % \
(' '*2*indent, self._prefix, url,
self._menuCSSClass, text)
def crumbLink(self, url, text):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '<B%s>%s</B>' % (text, self._crumbCSSClass)
else:
return '<A HREF="%s%s"%s>%s</A>' % \
(self._prefix, url, self._crumbCSSClass, text)
def crumbSeperator(self):
return ' > '
def crumbTerminator(self):
return ''
def emptyCrumb(self):
"""When you are at the homepage"""
return ''
## internal methods
def _menubarRecurse(self, contents, indent, stream):
if isinstance(contents, tuple):
url, text = contents
rest = []
else:
url, text = contents[0]
rest = contents[1:]
stream.write(self.menuLink(url, text, indent))
if self._inContents(contents):
for item in rest:
self._menubarRecurse(item, indent+1, stream)
def _inContents(self, contents):
if isinstance(contents, tuple):
return self._currentURL == contents[0]
for item in contents:
if self._inContents(item):
return True
return False
##################################################
## from the command line
if __name__ == '__main__':
hierarchy = [('/', 'home'),
('/about', 'About Us'),
[('/services', 'Services'),
[('/services/products', 'Products'),
('/services/products/widget', 'The Widget'),
('/services/products/wedge', 'The Wedge'),
('/services/products/thimble', 'The Thimble'),
],
('/services/prices', 'Prices'),
],
('/contact', 'Contact Us'),
]
for url in ['/', '/services', '/services/products/widget', '/contact']:
print('<p>', '='*50)
print('<br> %s: <br>\n' % url)
n = Hierarchy(hierarchy, url, menuCSSClass='menu', crumbCSSClass='crumb',
prefix='/here')
print(n.menuList())
print('<p>', '-'*50)
print(n.crumbs())
| Python |
#
| Python |
from turbocheetah import cheetahsupport
TurboCheetah = cheetahsupport.TurboCheetah
__all__ = ["TurboCheetah"] | Python |
"Template support for Cheetah"
import sys, os, imp
from Cheetah import Compiler
import pkg_resources
def _recompile_template(package, basename, tfile, classname):
tmpl = pkg_resources.resource_string(package, "%s.tmpl" % basename)
c = Compiler.Compiler(source=tmpl, mainClassName='GenTemplate')
code = str(c)
mod = imp.new_module(classname)
ns = dict()
exec(code, ns)
tempclass = ns.get("GenTemplate",
ns.get('DynamicallyCompiledCheetahTemplate'))
assert tempclass
tempclass.__name__ = basename
setattr(mod, basename, tempclass)
sys.modules[classname] = mod
return mod
class TurboCheetah:
extension = "tmpl"
def __init__(self, extra_vars_func=None, options=None):
if options is None:
options = dict()
self.get_extra_vars = extra_vars_func
self.options = options
self.compiledTemplates = {}
self.search_path = []
def load_template(self, template=None,
template_string=None, template_file=None,
loadingSite=False):
"""Searches for a template along the Python path.
Template files must end in ".tmpl" and be in legitimate packages.
"""
given = len([_f for _f in (template, template_string, template_file) if _f])
if given > 1:
raise TypeError(
"You may give only one of template, template_string, and "
"template_file")
if not given:
raise TypeError(
"You must give one of template, template_string, or "
"template_file")
if template:
return self.load_template_module(template)
elif template_string:
return self.load_template_string(template_string)
elif template_file:
return self.load_template_file(template_file)
def load_template_module(self, classname):
ct = self.compiledTemplates
divider = classname.rfind(".")
if divider > -1:
package = classname[0:divider]
basename = classname[divider+1:]
else:
raise ValueError("All templates must be in a package")
if not self.options.get("cheetah.precompiled", False):
tfile = pkg_resources.resource_filename(package,
"%s.%s" %
(basename,
self.extension))
if classname in ct:
mtime = os.stat(tfile).st_mtime
if ct[classname] != mtime:
ct[classname] = mtime
del sys.modules[classname]
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
else:
ct[classname] = os.stat(tfile).st_mtime
mod = _recompile_template(package, basename,
tfile, classname)
else:
mod = __import__(classname, dict(), dict(), [basename])
tempclass = getattr(mod, basename)
return tempclass
def load_template_string(self, content):
raise NotImplementedError
def load_template_file(self, filename):
raise NotImplementedError
def render(self, info, format="html", fragment=False, template=None,
template_string=None, template_file=None):
tclass = self.load_template(
template=template, template_string=template_string,
template_file=template_file)
if self.get_extra_vars:
extra = self.get_extra_vars()
else:
extra = {}
tempobj = tclass(searchList=[info, extra])
if fragment:
return tempobj.fragment()
else:
return tempobj.respond()
| Python |
'''
Provides dummy Transaction and Response classes is used by Cheetah in place
of real Webware transactions when the Template obj is not used directly as a
Webware servlet.
Warning: This may be deprecated in the future, please do not rely on any
specific DummyTransaction or DummyResponse behavior
'''
import logging
import types
class DummyResponseFailure(Exception):
pass
class DummyResponse(object):
'''
A dummy Response class is used by Cheetah in place of real Webware
Response objects when the Template obj is not used directly as a Webware
servlet
'''
def __init__(self):
self._outputChunks = []
def flush(self):
pass
def safeConvert(self, chunk):
# Exceptionally gross, but the safest way
# I've found to ensure I get a legit unicode object
if not chunk:
return u''
if isinstance(chunk, unicode):
return chunk
try:
return chunk.decode('utf-8', 'strict')
except UnicodeDecodeError:
try:
return chunk.decode('latin-1', 'strict')
except UnicodeDecodeError:
return chunk.decode('ascii', 'ignore')
except AttributeError:
return unicode(chunk, errors='ignore')
return chunk
def write(self, value):
self._outputChunks.append(value)
def writeln(self, txt):
write(txt)
write('\n')
def getvalue(self, outputChunks=None):
chunks = outputChunks or self._outputChunks
try:
return u''.join(chunks)
except UnicodeDecodeError, ex:
logging.debug('Trying to work around a UnicodeDecodeError in getvalue()')
logging.debug('...perhaps you could fix "%s" while you\'re debugging')
return ''.join((self.safeConvert(c) for c in chunks))
def writelines(self, *lines):
## not used
[self.writeln(ln) for ln in lines]
class DummyTransaction(object):
'''
A dummy Transaction class is used by Cheetah in place of real Webware
transactions when the Template obj is not used directly as a Webware
servlet.
It only provides a response object and method. All other methods and
attributes make no sense in this context.
'''
def __init__(self, *args, **kwargs):
self._response = None
def response(self, resp=None):
if self._response is None:
self._response = resp or DummyResponse()
return self._response
class TransformerResponse(DummyResponse):
def __init__(self, *args, **kwargs):
super(TransformerResponse, self).__init__(*args, **kwargs)
self._filter = None
def getvalue(self, **kwargs):
output = super(TransformerResponse, self).getvalue(**kwargs)
if self._filter:
_filter = self._filter
if isinstance(_filter, type):
_filter = _filter()
return _filter.filter(output)
return output
class TransformerTransaction(object):
def __init__(self, *args, **kwargs):
self._response = None
def response(self):
if self._response:
return self._response
return TransformerResponse()
| Python |
import Cheetah.Template
def render(template_file, **kwargs):
'''
Cheetah.Django.render() takes the template filename
(the filename should be a file in your Django
TEMPLATE_DIRS)
Any additional keyword arguments are passed into the
template are propogated into the template's searchList
'''
import django.http
import django.template.loader
source, loader = django.template.loader.find_template_source(template_file)
t = Cheetah.Template.Template(source, searchList=[kwargs])
return django.http.HttpResponse(t.__str__())
| Python |
'''
Filters for the #filter directive as well as #transform
#filter results in output filters Cheetah's $placeholders .
#transform results in a filter on the entirety of the output
'''
import sys
# Additional entities WebSafe knows how to transform. No need to include
# '<', '>' or '&' since those will have been done already.
webSafeEntities = {' ': ' ', '"': '"'}
class Filter(object):
"""A baseclass for the Cheetah Filters."""
def __init__(self, template=None):
"""Setup a reference to the template that is using the filter instance.
This reference isn't used by any of the standard filters, but is
available to Filter subclasses, should they need it.
Subclasses should call this method.
"""
self.template = template
def filter(self, val, encoding=None, str=str, **kw):
'''
Pass Unicode strings through unmolested, unless an encoding is specified.
'''
if val is None:
return u''
if isinstance(val, unicode):
# ignore the encoding and return the unicode object
return val
else:
try:
return unicode(val)
except UnicodeDecodeError:
# we could put more fallbacks here, but we'll just pass the str
# on and let DummyTransaction worry about it
return str(val)
RawOrEncodedUnicode = Filter
EncodeUnicode = Filter
class Markdown(EncodeUnicode):
'''
Markdown will change regular strings to Markdown
(http://daringfireball.net/projects/markdown/)
Such that:
My Header
=========
Becaomes:
<h1>My Header</h1>
and so on.
Markdown is meant to be used with the #transform
tag, as it's usefulness with #filter is marginal at
best
'''
def filter(self, value, **kwargs):
# This is a bit of a hack to allow outright embedding of the markdown module
try:
import markdown
except ImportError:
print('>>> Exception raised importing the "markdown" module')
print('>>> Are you sure you have the ElementTree module installed?')
print(' http://effbot.org/downloads/#elementtree')
raise
encoded = super(Markdown, self).filter(value, **kwargs)
return markdown.markdown(encoded)
class CodeHighlighter(EncodeUnicode):
'''
The CodeHighlighter filter depends on the "pygments" module which you can
download and install from: http://pygments.org
What the CodeHighlighter assumes the string that it's receiving is source
code and uses pygments.lexers.guess_lexer() to try to guess which parser
to use when highlighting it.
CodeHighlighter will return the HTML and CSS to render the code block, syntax
highlighted, in a browser
NOTE: I had an issue installing pygments on Linux/amd64/Python 2.6 dealing with
importing of pygments.lexers, I was able to correct the failure by adding:
raise ImportError
to line 39 of pygments/plugin.py (since importing pkg_resources was causing issues)
'''
def filter(self, source, **kwargs):
encoded = super(CodeHighlighter, self).filter(source, **kwargs)
try:
from pygments import highlight
from pygments import lexers
from pygments import formatters
except ImportError, ex:
print('<%s> - Failed to import pygments! (%s)' % (self.__class__.__name__, ex))
print('-- You may need to install it from: http://pygments.org')
return encoded
lexer = None
try:
lexer = lexers.guess_lexer(source)
except lexers.ClassNotFound:
lexer = lexers.PythonLexer()
formatter = formatters.HtmlFormatter(cssclass='code_highlighter')
encoded = highlight(encoded, lexer, formatter)
css = formatter.get_style_defs('.code_highlighter')
return '''<style type="text/css"><!--
%(css)s
--></style>%(source)s''' % {'css' : css, 'source' : encoded}
class MaxLen(Filter):
def filter(self, val, **kw):
"""Replace None with '' and cut off at maxlen."""
output = super(MaxLen, self).filter(val, **kw)
if 'maxlen' in kw and len(output) > kw['maxlen']:
return output[:kw['maxlen']]
return output
class WebSafe(Filter):
"""Escape HTML entities in $placeholders.
"""
def filter(self, val, **kw):
s = super(WebSafe, self).filter(val, **kw)
# These substitutions are copied from cgi.escape().
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
# Process the additional transformations if any.
if 'also' in kw:
also = kw['also']
entities = webSafeEntities # Global variable.
for k in also:
if k in entities:
v = entities[k]
else:
v = "&#%s;" % ord(k)
s = s.replace(k, v)
return s
class Strip(Filter):
"""Strip leading/trailing whitespace but preserve newlines.
This filter goes through the value line by line, removing leading and
trailing whitespace on each line. It does not strip newlines, so every
input line corresponds to one output line, with its trailing newline intact.
We do not use val.split('\n') because that would squeeze out consecutive
blank lines. Instead, we search for each newline individually. This
makes us unable to use the fast C .split method, but it makes the filter
much more widely useful.
This filter is intended to be usable both with the #filter directive and
with the proposed #sed directive (which has not been ratified yet.)
"""
def filter(self, val, **kw):
s = super(Strip, self).filter(val, **kw)
result = []
start = 0 # The current line will be s[start:end].
while True: # Loop through each line.
end = s.find('\n', start) # Find next newline.
if end == -1: # If no more newlines.
break
chunk = s[start:end].strip()
result.append(chunk)
result.append('\n')
start = end + 1
# Write the unfinished portion after the last newline, if any.
chunk = s[start:].strip()
result.append(chunk)
return "".join(result)
class StripSqueeze(Filter):
"""Canonicalizes every chunk of whitespace to a single space.
Strips leading/trailing whitespace. Removes all newlines, so multi-line
input is joined into one ling line with NO trailing newline.
"""
def filter(self, val, **kw):
s = super(StripSqueeze, self).filter(val, **kw)
s = s.split()
return " ".join(s)
##################################################
## MAIN ROUTINE -- testing
def test():
s1 = "abc <=> &"
s2 = " asdf \n\t 1 2 3\n"
print("WebSafe INPUT:", repr(s1))
print(" WebSafe:", repr(WebSafe().filter(s1)))
print()
print(" Strip INPUT:", repr(s2))
print(" Strip:", repr(Strip().filter(s2)))
print("StripSqueeze:", repr(StripSqueeze().filter(s2)))
print("Unicode:", repr(EncodeUnicode().filter(u'aoeu12345\u1234')))
if __name__ == "__main__":
test()
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
'''
Provides several CacheStore backends for Cheetah's caching framework. The
methods provided by these classes have the same semantics as those in the
python-memcached API, except for their return values:
set(key, val, time=0)
set the value unconditionally
add(key, val, time=0)
set only if the server doesn't already have this key
replace(key, val, time=0)
set only if the server already have this key
get(key, val)
returns val or raises a KeyError
delete(key)
deletes or raises a KeyError
'''
import time
from Cheetah.Utils.memcache import Client as MemcachedClient
class Error(Exception):
pass
class AbstractCacheStore(object):
def set(self, key, val, time=None):
raise NotImplementedError
def add(self, key, val, time=None):
raise NotImplementedError
def replace(self, key, val, time=None):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
class MemoryCacheStore(AbstractCacheStore):
def __init__(self):
self._data = {}
def set(self, key, val, time=0):
self._data[key] = (val, time)
def add(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
if key in self._data:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
del self._data[key]
def get(self, key):
(val, exptime) = self._data[key]
if exptime and time.time() > exptime:
del self._data[key]
raise KeyError(key)
else:
return val
def clear(self):
self._data.clear()
class MemcachedCacheStore(AbstractCacheStore):
servers = ('127.0.0.1:11211')
def __init__(self, servers=None, debug=False):
if servers is None:
servers = self.servers
self._client = MemcachedClient(servers, debug)
def set(self, key, val, time=0):
self._client.set(key, val, time)
def add(self, key, val, time=0):
res = self._client.add(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def replace(self, key, val, time=0):
res = self._client.replace(key, val, time)
if not res:
raise Error('a value for key %r is already in the cache'%key)
self._data[key] = (val, time)
def delete(self, key):
res = self._client.delete(key, time=0)
if not res:
raise KeyError(key)
def get(self, key):
val = self._client.get(key)
if val is None:
raise KeyError(key)
else:
return val
def clear(self):
self._client.flush_all()
| Python |
try:
from ds.sys.Unspecified import Unspecified
except ImportError:
class _Unspecified:
def __repr__(self):
return 'Unspecified'
def __str__(self):
return 'Unspecified'
Unspecified = _Unspecified()
| Python |
# $Id: CacheRegion.py,v 1.3 2006/01/28 04:19:30 tavis_rudd Exp $
'''
Cache holder classes for Cheetah:
Cache regions are defined using the #cache Cheetah directive. Each
cache region can be viewed as a dictionary (keyed by cacheRegionID)
handling at least one cache item (the default one). It's possible to add
cacheItems in a region by using the `varyBy` #cache directive parameter as
in the following example::
#def getArticle
this is the article content.
#end def
#cache varyBy=$getArticleID()
$getArticle($getArticleID())
#end cache
The code above will generate a CacheRegion and add new cacheItem for each value
of $getArticleID().
'''
try:
from hashlib import md5
except ImportError:
from md5 import md5
import time
import Cheetah.CacheStore
class CacheItem(object):
'''
A CacheItem is a container storing:
- cacheID (string)
- refreshTime (timestamp or None) : last time the cache was refreshed
- data (string) : the content of the cache
'''
def __init__(self, cacheItemID, cacheStore):
self._cacheItemID = cacheItemID
self._cacheStore = cacheStore
self._refreshTime = None
self._expiryTime = 0
def hasExpired(self):
return (self._expiryTime and time.time() > self._expiryTime)
def setExpiryTime(self, time):
self._expiryTime = time
def getExpiryTime(self):
return self._expiryTime
def setData(self, data):
self._refreshTime = time.time()
self._cacheStore.set(self._cacheItemID, data, self._expiryTime)
def getRefreshTime(self):
return self._refreshTime
def getData(self):
assert self._refreshTime
return self._cacheStore.get(self._cacheItemID)
def renderOutput(self):
"""Can be overridden to implement edge-caching"""
return self.getData() or ""
def clear(self):
self._cacheStore.delete(self._cacheItemID)
self._refreshTime = None
class _CacheDataStoreWrapper(object):
def __init__(self, dataStore, keyPrefix):
self._dataStore = dataStore
self._keyPrefix = keyPrefix
def get(self, key):
return self._dataStore.get(self._keyPrefix+key)
def delete(self, key):
self._dataStore.delete(self._keyPrefix+key)
def set(self, key, val, time=0):
self._dataStore.set(self._keyPrefix+key, val, time=time)
class CacheRegion(object):
'''
A `CacheRegion` stores some `CacheItem` instances.
This implementation stores the data in the memory of the current process.
If you need a more advanced data store, create a cacheStore class that works
with Cheetah's CacheStore protocol and provide it as the cacheStore argument
to __init__. For example you could use
Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python
memcached API (http://www.danga.com/memcached).
'''
_cacheItemClass = CacheItem
def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None):
self._isNew = True
self._regionID = regionID
self._templateCacheIdPrefix = templateCacheIdPrefix
if not cacheStore:
cacheStore = Cheetah.CacheStore.MemoryCacheStore()
self._cacheStore = cacheStore
self._wrappedCacheDataStore = _CacheDataStoreWrapper(
cacheStore, keyPrefix=templateCacheIdPrefix+':'+regionID+':')
self._cacheItems = {}
def isNew(self):
return self._isNew
def clear(self):
" drop all the caches stored in this cache region "
for cacheItemId in self._cacheItems.keys():
cacheItem = self._cacheItems[cacheItemId]
cacheItem.clear()
del self._cacheItems[cacheItemId]
def getCacheItem(self, cacheItemID):
""" Lazy access to a cacheItem
Try to find a cache in the stored caches. If it doesn't
exist, it's created.
Returns a `CacheItem` instance.
"""
cacheItemID = md5(str(cacheItemID)).hexdigest()
if cacheItemID not in self._cacheItems:
cacheItem = self._cacheItemClass(
cacheItemID=cacheItemID, cacheStore=self._wrappedCacheDataStore)
self._cacheItems[cacheItemID] = cacheItem
self._isNew = False
return self._cacheItems[cacheItemID]
| Python |
# $Id: NameMapper.py,v 1.32 2007/12/10 19:20:09 tavis_rudd Exp $
"""This module supports Cheetah's optional NameMapper syntax.
Overview
================================================================================
NameMapper provides a simple syntax for accessing Python data structures,
functions, and methods from Cheetah. It's called NameMapper because it 'maps'
simple 'names' in Cheetah templates to possibly more complex syntax in Python.
Its purpose is to make working with Cheetah easy for non-programmers.
Specifically, non-programmers using Cheetah should NOT need to be taught (a)
what the difference is between an object and a dictionary, (b) what functions
and methods are, and (c) what 'self' is. A further aim (d) is to buffer the
code in Cheetah templates from changes in the implementation of the Python data
structures behind them.
Consider this scenario:
You are building a customer information system. The designers with you want to
use information from your system on the client's website --AND-- they want to
understand the display code and so they can maintian it themselves.
You write a UI class with a 'customers' method that returns a dictionary of all
the customer objects. Each customer object has an 'address' method that returns
the a dictionary with information about the customer's address. The designers
want to be able to access that information.
Using PSP, the display code for the website would look something like the
following, assuming your servlet subclasses the class you created for managing
customer information:
<%= self.customer()[ID].address()['city'] %> (42 chars)
Using Cheetah's NameMapper syntax it could be any of the following:
$self.customers()[$ID].address()['city'] (39 chars)
--OR--
$customers()[$ID].address()['city']
--OR--
$customers()[$ID].address().city
--OR--
$customers()[$ID].address.city
--OR--
$customers()[$ID].address.city
--OR--
$customers[$ID].address.city (27 chars)
Which of these would you prefer to explain to the designers, who have no
programming experience? The last form is 15 characters shorter than the PSP
and, conceptually, is far more accessible. With PHP or ASP, the code would be
even messier than the PSP
This is a rather extreme example and, of course, you could also just implement
'$getCustomer($ID).city' and obey the Law of Demeter (search Google for more on that).
But good object orientated design isn't the point here.
Details
================================================================================
The parenthesized letters below correspond to the aims in the second paragraph.
DICTIONARY ACCESS (a)
---------------------
NameMapper allows access to items in a dictionary using the same dotted notation
used to access object attributes in Python. This aspect of NameMapper is known
as 'Unified Dotted Notation'.
For example, with Cheetah it is possible to write:
$customers()['kerr'].address() --OR-- $customers().kerr.address()
where the second form is in NameMapper syntax.
This only works with dictionary keys that are also valid python identifiers:
regex = '[a-zA-Z_][a-zA-Z_0-9]*'
AUTOCALLING (b,d)
-----------------
NameMapper automatically detects functions and methods in Cheetah $vars and calls
them if the parentheses have been left off.
For example if 'a' is an object, 'b' is a method
$a.b
is equivalent to
$a.b()
If b returns a dictionary, then following variations are possible
$a.b.c --OR-- $a.b().c --OR-- $a.b()['c']
where 'c' is a key in the dictionary that a.b() returns.
Further notes:
* NameMapper autocalls the function or method without any arguments. Thus
autocalling can only be used with functions or methods that either have no
arguments or have default values for all arguments.
* NameMapper only autocalls functions and methods. Classes and callable object instances
will not be autocalled.
* Autocalling can be disabled using Cheetah's 'useAutocalling' setting.
LEAVING OUT 'self' (c,d)
------------------------
NameMapper makes it possible to access the attributes of a servlet in Cheetah
without needing to include 'self' in the variable names. See the NAMESPACE
CASCADING section below for details.
NAMESPACE CASCADING (d)
--------------------
...
Implementation details
================================================================================
* NameMapper's search order is dictionary keys then object attributes
* NameMapper.NotFound is raised if a value can't be found for a name.
Performance and the C version
================================================================================
Cheetah comes with both a C version and a Python version of NameMapper. The C
version is significantly faster and the exception tracebacks are much easier to
read. It's still slower than standard Python syntax, but you won't notice the
difference in realistic usage scenarios.
Cheetah uses the optimized C version (_namemapper.c) if it has
been compiled or falls back to the Python version if not.
Meta-Data
================================================================================
Authors: Tavis Rudd <tavis@damnsimple.com>,
Chuck Esterbrook <echuck@mindspring.com>
Version: $Revision: 1.32 $
Start Date: 2001/04/03
Last Revision Date: $Date: 2007/12/10 19:20:09 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>," +\
"\nChuck Esterbrook <echuck@mindspring.com>"
__revision__ = "$Revision: 1.32 $"[11:-2]
import types
from types import StringType, InstanceType, ClassType, TypeType
from pprint import pformat
import inspect
import pdb
_INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS = False
_ALLOW_WRAPPING_OF_NOTFOUND_EXCEPTIONS = True
__all__ = ['NotFound',
'hasKey',
'valueForKey',
'valueForName',
'valueFromSearchList',
'valueFromFrameOrSearchList',
'valueFromFrame',
]
if not hasattr(inspect.imp, 'get_suffixes'):
# This is to fix broken behavior of the inspect module under the
# Google App Engine, see the following issue:
# http://bugs.communitycheetah.org/view.php?id=10
setattr(inspect.imp, 'get_suffixes', lambda: [('.py', 'U', 1)])
## N.B. An attempt is made at the end of this module to import C versions of
## these functions. If _namemapper.c has been compiled succesfully and the
## import goes smoothly, the Python versions defined here will be replaced with
## the C versions.
class NotFound(LookupError):
pass
def _raiseNotFoundException(key, namespace):
excString = "cannot find '%s'"%key
if _INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS:
excString += ' in the namespace %s'%pformat(namespace)
raise NotFound(excString)
def _wrapNotFoundException(exc, fullName, namespace):
if not _ALLOW_WRAPPING_OF_NOTFOUND_EXCEPTIONS:
raise
else:
excStr = exc.args[0]
if excStr.find('while searching')==-1: # only wrap once!
excStr +=" while searching for '%s'"%fullName
if _INCLUDE_NAMESPACE_REPR_IN_NOTFOUND_EXCEPTIONS:
excStr += ' in the namespace %s'%pformat(namespace)
exc.args = (excStr,)
raise
def _isInstanceOrClass(obj):
if type(obj) in (InstanceType, ClassType):
# oldstyle
return True
if hasattr(obj, "__class__"):
# newstyle
if hasattr(obj, 'mro'):
# type/class
return True
elif (hasattr(obj, 'im_func') or hasattr(obj, 'func_code') or hasattr(obj, '__self__')):
# method, func, or builtin func
return False
elif hasattr(obj, '__init__'):
# instance
return True
return False
def hasKey(obj, key):
"""Determine if 'obj' has 'key' """
if hasattr(obj, 'has_key') and key in obj:
return True
elif hasattr(obj, key):
return True
else:
return False
def valueForKey(obj, key):
if hasattr(obj, 'has_key') and key in obj:
return obj[key]
elif hasattr(obj, key):
return getattr(obj, key)
else:
_raiseNotFoundException(key, obj)
def _valueForName(obj, name, executeCallables=False):
nameChunks=name.split('.')
for i in range(len(nameChunks)):
key = nameChunks[i]
## BEGIN HACK for getattr() first, then 'has_key':
try:
nextObj = getattr(obj, key)
except AttributeError:
try:
nextObj = obj[key]
except TypeError:
_raiseNotFoundException(key, obj)
## END HACK
## BEGIN ORIGINAL CODE
#if hasattr(obj, 'has_key') and key in obj:
# nextObj = obj[key]
#else:
# try:
# nextObj = getattr(obj, key)
# except AttributeError:
# _raiseNotFoundException(key, obj)
## END ORIGINAL CODE
if executeCallables and hasattr(nextObj, '__call__') and not _isInstanceOrClass(nextObj):
obj = nextObj()
else:
obj = nextObj
return obj
def valueForName(obj, name, executeCallables=False):
try:
return _valueForName(obj, name, executeCallables)
except NotFound, e:
_wrapNotFoundException(e, fullName=name, namespace=obj)
def valueFromSearchList(searchList, name, executeCallables=False):
key = name.split('.')[0]
for namespace in searchList:
if hasKey(namespace, key):
return _valueForName(namespace, name,
executeCallables=executeCallables)
_raiseNotFoundException(key, searchList)
def _namespaces(callerFrame, searchList=None):
yield callerFrame.f_locals
if searchList:
for namespace in searchList:
yield namespace
yield callerFrame.f_globals
yield __builtins__
def valueFromFrameOrSearchList(searchList, name, executeCallables=False,
frame=None):
def __valueForName():
try:
return _valueForName(namespace, name, executeCallables=executeCallables)
except NotFound, e:
_wrapNotFoundException(e, fullName=name, namespace=searchList)
try:
if not frame:
frame = inspect.stack()[1][0]
key = name.split('.')[0]
for namespace in _namespaces(frame, searchList):
if hasKey(namespace, key):
return __valueForName()
_raiseNotFoundException(key, searchList)
finally:
del frame
def valueFromFrame(name, executeCallables=False, frame=None):
# @@TR consider implementing the C version the same way
# at the moment it provides a seperate but mirror implementation
# to valueFromFrameOrSearchList
try:
if not frame:
frame = inspect.stack()[1][0]
return valueFromFrameOrSearchList(searchList=None,
name=name,
executeCallables=executeCallables,
frame=frame)
finally:
del frame
def hasName(obj, name):
#Not in the C version
"""Determine if 'obj' has the 'name' """
key = name.split('.')[0]
if not hasKey(obj, key):
return False
try:
valueForName(obj, name)
return True
except NotFound:
return False
try:
from _namemapper import NotFound, valueForKey, valueForName, \
valueFromSearchList, valueFromFrameOrSearchList, valueFromFrame
# it is possible with Jython or Windows, for example, that _namemapper.c hasn't been compiled
C_VERSION = True
except:
C_VERSION = False
##################################################
## CLASSES
class Mixin:
"""@@ document me"""
def valueForName(self, name):
return valueForName(self, name)
def valueForKey(self, key):
return valueForKey(self, key)
##################################################
## if run from the command line ##
def example():
class A(Mixin):
classVar = 'classVar val'
def method(self,arg='method 1 default arg'):
return arg
def method2(self, arg='meth 2 default arg'):
return {'item1':arg}
def method3(self, arg='meth 3 default'):
return arg
class B(A):
classBvar = 'classBvar val'
a = A()
a.one = 'valueForOne'
def function(whichOne='default'):
values = {
'default': 'default output',
'one': 'output option one',
'two': 'output option two'
}
return values[whichOne]
a.dic = {
'func': function,
'method': a.method3,
'item': 'itemval',
'subDict': {'nestedMethod':a.method3}
}
b = 'this is local b'
print(valueForKey(a.dic, 'subDict'))
print(valueForName(a, 'dic.item'))
print(valueForName(vars(), 'b'))
print(valueForName(__builtins__, 'dir')())
print(valueForName(vars(), 'a.classVar'))
print(valueForName(vars(), 'a.dic.func', executeCallables=True))
print(valueForName(vars(), 'a.method2.item1', executeCallables=True))
if __name__ == '__main__':
example()
| Python |
#!/usr/bin/env python
'''
Tests for the 'cheetah' command.
Besides unittest usage, recognizes the following command-line options:
--list CheetahWrapper.py
List all scenarios that are tested. The argument is the path
of this script.
--nodelete
Don't delete scratch directory at end.
--output
Show the output of each subcommand. (Normally suppressed.)
'''
import os
import os.path
import pdb
import re # Used by listTests.
import shutil
import sys
import tempfile
import unittest
from optparse import OptionParser
from Cheetah.CheetahWrapper import CheetahWrapper # Used by NoBackup.
try:
from subprocess import Popen, PIPE, STDOUT
class Popen4(Popen):
def __init__(self, cmd, bufsize=-1, shell=True, close_fds=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, **kwargs):
super(Popen4, self).__init__(cmd, bufsize=bufsize, shell=shell,
close_fds=close_fds, stdin=stdin, stdout=stdout,
stderr=stderr, **kwargs)
self.tochild = self.stdin
self.fromchild = self.stdout
self.childerr = self.stderr
except ImportError:
from popen2 import Popen4
DELETE = True # True to clean up after ourselves, False for debugging.
OUTPUT = False # Normally False, True for debugging.
BACKUP_SUFFIX = CheetahWrapper.BACKUP_SUFFIX
def warn(msg):
sys.stderr.write(msg + '\n')
class CFBase(unittest.TestCase):
"""Base class for "cheetah compile" and "cheetah fill" unit tests.
"""
srcDir = '' # Nonblank to create source directory.
subdirs = ('child', 'child/grandkid') # Delete in reverse order.
srcFiles = ('a.tmpl', 'child/a.tmpl', 'child/grandkid/a.tmpl')
expectError = False # Used by --list option.
def inform(self, message):
if self.verbose:
print(message)
def setUp(self):
"""Create the top-level directories, subdirectories and .tmpl
files.
"""
I = self.inform
# Step 1: Create the scratch directory and chdir into it.
self.scratchDir = scratchDir = tempfile.mktemp()
os.mkdir(scratchDir)
self.origCwd = os.getcwd()
os.chdir(scratchDir)
if self.srcDir:
os.mkdir(self.srcDir)
# Step 2: Create source subdirectories.
for dir in self.subdirs:
os.mkdir(dir)
# Step 3: Create the .tmpl files, each in its proper directory.
for fil in self.srcFiles:
f = open(fil, 'w')
f.write("Hello, world!\n")
f.close()
def tearDown(self):
os.chdir(self.origCwd)
if DELETE:
shutil.rmtree(self.scratchDir, True) # Ignore errors.
if os.path.exists(self.scratchDir):
warn("Warning: unable to delete scratch directory %s")
else:
warn("Warning: not deleting scratch directory %s" % self.scratchDir)
def _checkDestFileHelper(self, path, expected,
allowSurroundingText, errmsg):
"""Low-level helper to check a destination file.
in : path, string, the destination path.
expected, string, the expected contents.
allowSurroundingtext, bool, allow the result to contain
additional text around the 'expected' substring?
errmsg, string, the error message. It may contain the
following "%"-operator keys: path, expected, result.
out: None
"""
path = os.path.abspath(path)
exists = os.path.exists(path)
msg = "destination file missing: %s" % path
self.failUnless(exists, msg)
f = open(path, 'r')
result = f.read()
f.close()
if allowSurroundingText:
success = result.find(expected) != -1
else:
success = result == expected
msg = errmsg % locals()
self.failUnless(success, msg)
def checkCompile(self, path):
# Raw string to prevent "\n" from being converted to a newline.
#expected = R"write('Hello, world!\n')"
expected = "Hello, world!" # might output a u'' string
errmsg = """\
destination file %(path)s doesn't contain expected substring:
%(expected)r"""
self._checkDestFileHelper(path, expected, True, errmsg)
def checkFill(self, path):
expected = "Hello, world!\n"
errmsg = """\
destination file %(path)s contains wrong result.
Expected %(expected)r
Found %(result)r"""
self._checkDestFileHelper(path, expected, False, errmsg)
def checkSubdirPyInit(self, path):
"""Verify a destination subdirectory exists and contains an
__init__.py file.
"""
exists = os.path.exists(path)
msg = "destination subdirectory %s misssing" % path
self.failUnless(exists, msg)
initPath = os.path.join(path, "__init__.py")
exists = os.path.exists(initPath)
msg = "destination init file missing: %s" % initPath
self.failUnless(exists, msg)
def checkNoBackup(self, path):
"""Verify 'path' does not exist. (To check --nobackup.)
"""
exists = os.path.exists(path)
msg = "backup file exists in spite of --nobackup: %s" % path
self.failIf(exists, msg)
def locate_command(self, cmd):
paths = os.getenv('PATH')
if not paths:
return cmd
parts = cmd.split(' ')
paths = paths.split(':')
for p in paths:
p = p + os.path.sep + parts[0]
if os.path.isfile(p):
return ' '.join([p] + parts[1:])
return ' '.join(parts)
def assertWin32Subprocess(self, cmd):
_in, _out = os.popen4(cmd)
_in.close()
output = _out.read()
rc = _out.close()
if rc is None:
rc = 0
return rc, output
def assertPosixSubprocess(self, cmd):
cmd = self.locate_command(cmd)
process = Popen4(cmd, env=os.environ)
process.tochild.close()
output = process.fromchild.read()
status = process.wait()
process.fromchild.close()
return status, output
def assertSubprocess(self, cmd, nonzero=False):
status, output = None, None
if sys.platform == 'win32':
status, output = self.assertWin32Subprocess(cmd)
else:
status, output = self.assertPosixSubprocess(cmd)
if not nonzero:
self.failUnlessEqual(status, 0, '''Subprocess exited with a non-zero status (%d)
%s''' % (status, output))
else:
self.failIfEqual(status, 0, '''Subprocess exited with a zero status (%d)
%s''' % (status, output))
return output
def go(self, cmd, expectedStatus=0, expectedOutputSubstring=None):
"""Run a "cheetah compile" or "cheetah fill" subcommand.
in : cmd, string, the command to run.
expectedStatus, int, subcommand's expected output status.
0 if the subcommand is expected to succeed, 1-255 otherwise.
expectedOutputSubstring, string, substring which much appear
in the standard output or standard error. None to skip this
test.
out: None.
"""
output = self.assertSubprocess(cmd)
if expectedOutputSubstring is not None:
msg = "substring %r not found in subcommand output: %s" % \
(expectedOutputSubstring, cmd)
substringTest = output.find(expectedOutputSubstring) != -1
self.failUnless(substringTest, msg)
class CFIdirBase(CFBase):
"""Subclass for tests with --idir.
"""
srcDir = 'SRC'
subdirs = ('SRC/child', 'SRC/child/grandkid') # Delete in reverse order.
srcFiles = ('SRC/a.tmpl', 'SRC/child/a.tmpl', 'SRC/child/grandkid/a.tmpl')
##################################################
## TEST CASE CLASSES
class OneFile(CFBase):
def testCompile(self):
self.go("cheetah compile a.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill a.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt a.tmpl")
self.checkFill("a.txt")
class OneFileNoExtension(CFBase):
def testCompile(self):
self.go("cheetah compile a")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill a")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt a")
self.checkFill("a.txt")
class SplatTmpl(CFBase):
def testCompile(self):
self.go("cheetah compile *.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill *.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --oext txt *.tmpl")
self.checkFill("a.txt")
class ThreeFilesWithSubdirectories(CFBase):
def testCompile(self):
self.go("cheetah compile a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt a.tmpl child/a.tmpl child/grandkid/a.tmpl")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class ThreeFilesWithSubdirectoriesNoExtension(CFBase):
def testCompile(self):
self.go("cheetah compile a child/a child/grandkid/a")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill a child/a child/grandkid/a")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt a child/a child/grandkid/a")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class SplatTmplWithSubdirectories(CFBase):
def testCompile(self):
self.go("cheetah compile *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkCompile("a.py")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --oext txt *.tmpl child/*.tmpl child/grandkid/*.tmpl")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class OneFileWithOdir(CFBase):
def testCompile(self):
self.go("cheetah compile --odir DEST a.tmpl")
self.checkSubdirPyInit("DEST")
self.checkCompile("DEST/a.py")
def testFill(self):
self.go("cheetah fill --odir DEST a.tmpl")
self.checkFill("DEST/a.html")
def testText(self):
self.go("cheetah fill --odir DEST --oext txt a.tmpl")
self.checkFill("DEST/a.txt")
class VarietyWithOdir(CFBase):
def testCompile(self):
self.go("cheetah compile --odir DEST a.tmpl child/a child/grandkid/*.tmpl")
self.checkSubdirPyInit("DEST")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/a.py")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill --odir DEST a.tmpl child/a child/grandkid/*.tmpl")
self.checkFill("DEST/a.html")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill --odir DEST --oext txt a.tmpl child/a child/grandkid/*.tmpl")
self.checkFill("DEST/a.txt")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class RecurseExplicit(CFBase):
def testCompile(self):
self.go("cheetah compile -R child")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R child")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --oext txt child")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class RecurseImplicit(CFBase):
def testCompile(self):
self.go("cheetah compile -R")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R")
self.checkFill("a.html")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --oext txt")
self.checkFill("a.txt")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class RecurseExplicitWIthOdir(CFBase):
def testCompile(self):
self.go("cheetah compile -R --odir DEST child")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --odir DEST child")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --odir DEST --oext txt child")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class Flat(CFBase):
def testCompile(self):
self.go("cheetah compile --flat child/a.tmpl")
self.checkCompile("a.py")
def testFill(self):
self.go("cheetah fill --flat child/a.tmpl")
self.checkFill("a.html")
def testText(self):
self.go("cheetah fill --flat --oext txt child/a.tmpl")
self.checkFill("a.txt")
class FlatRecurseCollision(CFBase):
expectError = True
def testCompile(self):
self.assertSubprocess("cheetah compile -R --flat", nonzero=True)
def testFill(self):
self.assertSubprocess("cheetah fill -R --flat", nonzero=True)
def testText(self):
self.assertSubprocess("cheetah fill -R --flat", nonzero=True)
class IdirRecurse(CFIdirBase):
def testCompile(self):
self.go("cheetah compile -R --idir SRC child")
self.checkSubdirPyInit("child")
self.checkSubdirPyInit("child/grandkid")
self.checkCompile("child/a.py")
self.checkCompile("child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --idir SRC child")
self.checkFill("child/a.html")
self.checkFill("child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --idir SRC --oext txt child")
self.checkFill("child/a.txt")
self.checkFill("child/grandkid/a.txt")
class IdirOdirRecurse(CFIdirBase):
def testCompile(self):
self.go("cheetah compile -R --idir SRC --odir DEST child")
self.checkSubdirPyInit("DEST/child")
self.checkSubdirPyInit("DEST/child/grandkid")
self.checkCompile("DEST/child/a.py")
self.checkCompile("DEST/child/grandkid/a.py")
def testFill(self):
self.go("cheetah fill -R --idir SRC --odir DEST child")
self.checkFill("DEST/child/a.html")
self.checkFill("DEST/child/grandkid/a.html")
def testText(self):
self.go("cheetah fill -R --idir SRC --odir DEST --oext txt child")
self.checkFill("DEST/child/a.txt")
self.checkFill("DEST/child/grandkid/a.txt")
class IdirFlatRecurseCollision(CFIdirBase):
expectError = True
def testCompile(self):
self.assertSubprocess("cheetah compile -R --flat --idir SRC", nonzero=True)
def testFill(self):
self.assertSubprocess("cheetah fill -R --flat --idir SRC", nonzero=True)
def testText(self):
self.assertSubprocess("cheetah fill -R --flat --idir SRC --oext txt", nonzero=True)
class NoBackup(CFBase):
"""Run the command twice each time and verify a backup file is
*not* created.
"""
def testCompile(self):
self.go("cheetah compile --nobackup a.tmpl")
self.go("cheetah compile --nobackup a.tmpl")
self.checkNoBackup("a.py" + BACKUP_SUFFIX)
def testFill(self):
self.go("cheetah fill --nobackup a.tmpl")
self.go("cheetah fill --nobackup a.tmpl")
self.checkNoBackup("a.html" + BACKUP_SUFFIX)
def testText(self):
self.go("cheetah fill --nobackup --oext txt a.tmpl")
self.go("cheetah fill --nobackup --oext txt a.tmpl")
self.checkNoBackup("a.txt" + BACKUP_SUFFIX)
def listTests(cheetahWrapperFile):
"""cheetahWrapperFile, string, path of this script.
XXX TODO: don't print test where expectError is true.
"""
rx = re.compile( R'self\.go\("(.*?)"\)' )
f = open(cheetahWrapperFile)
while True:
lin = f.readline()
if not lin:
break
m = rx.search(lin)
if m:
print(m.group(1))
f.close()
def main():
global DELETE, OUTPUT
parser = OptionParser()
parser.add_option("--list", action="store", dest="listTests")
parser.add_option("--nodelete", action="store_true")
parser.add_option("--output", action="store_true")
# The following options are passed to unittest.
parser.add_option("-e", "--explain", action="store_true")
parser.add_option("-v", "--verbose", action="store_true")
parser.add_option("-q", "--quiet", action="store_true")
opts, files = parser.parse_args()
if opts.nodelete:
DELETE = False
if opts.output:
OUTPUT = True
if opts.listTests:
listTests(opts.listTests)
else:
# Eliminate script-specific command-line arguments to prevent
# errors in unittest.
del sys.argv[1:]
for opt in ("explain", "verbose", "quiet"):
if getattr(opts, opt):
sys.argv.append("--" + opt)
sys.argv.extend(files)
unittest.main()
if __name__ == '__main__':
main()
# vim: sw=4 ts=4 expandtab
| Python |
#!/usr/bin/env python
'''
Core module of Cheetah's Unit-testing framework
TODO
================================================================================
# combo tests
# negative test cases for expected exceptions
# black-box vs clear-box testing
# do some tests that run the Template for long enough to check that the refresh code works
'''
import sys
import unittest
from Cheetah.Tests import SyntaxAndOutput
from Cheetah.Tests import NameMapper
from Cheetah.Tests import Misc
from Cheetah.Tests import Filters
from Cheetah.Tests import Template
from Cheetah.Tests import Cheps
from Cheetah.Tests import Parser
from Cheetah.Tests import Regressions
from Cheetah.Tests import Unicode
from Cheetah.Tests import CheetahWrapper
from Cheetah.Tests import Analyzer
SyntaxAndOutput.install_eols()
suites = [
unittest.findTestCases(SyntaxAndOutput),
unittest.findTestCases(NameMapper),
unittest.findTestCases(Filters),
unittest.findTestCases(Template),
#unittest.findTestCases(Cheps),
unittest.findTestCases(Regressions),
unittest.findTestCases(Unicode),
unittest.findTestCases(Misc),
unittest.findTestCases(Parser),
unittest.findTestCases(Analyzer),
]
if not sys.platform.startswith('java'):
suites.append(unittest.findTestCases(CheetahWrapper))
if __name__ == '__main__':
runner = unittest.TextTestRunner()
if 'xml' in sys.argv:
import xmlrunner
runner = xmlrunner.XMLTestRunner(filename='Cheetah-Tests.xml')
results = runner.run(unittest.TestSuite(suites))
| Python |
"""
XML Test Runner for PyUnit
"""
# Written by Sebastian Rittau <srittau@jroger.in-berlin.de> and placed in
# the Public Domain. With contributions by Paolo Borelli.
__revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $"
import os.path
import re
import sys
import time
import traceback
import unittest
from StringIO import StringIO
from xml.sax.saxutils import escape
from StringIO import StringIO
class _TestInfo(object):
"""Information about a particular test.
Used by _XMLTestResult.
"""
def __init__(self, test, time):
_pieces = test.id().split('.')
(self._class, self._method) = ('.'.join(_pieces[:-1]), _pieces[-1])
self._time = time
self._error = None
self._failure = None
def print_report(self, stream):
"""Print information about this test case in XML format to the
supplied stream.
"""
stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \
{
"class": self._class,
"method": self._method,
"time": self._time,
})
if self._failure != None:
self._print_error(stream, 'failure', self._failure)
if self._error != None:
self._print_error(stream, 'error', self._error)
stream.write('</testcase>\n')
def _print_error(self, stream, tagname, error):
"""Print information from a failure or error to the supplied stream."""
text = escape(str(error[1]))
stream.write('\n')
stream.write(' <%s type="%s">%s\n' \
% (tagname, issubclass(error[0], Exception) and error[0].__name__ or str(error[0]), text))
tb_stream = StringIO()
traceback.print_tb(error[2], None, tb_stream)
stream.write(escape(tb_stream.getvalue()))
stream.write(' </%s>\n' % tagname)
stream.write(' ')
# Module level functions since Python 2.3 doesn't grok decorators
def create_success(test, time):
"""Create a _TestInfo instance for a successful test."""
return _TestInfo(test, time)
def create_failure(test, time, failure):
"""Create a _TestInfo instance for a failed test."""
info = _TestInfo(test, time)
info._failure = failure
return info
def create_error(test, time, error):
"""Create a _TestInfo instance for an erroneous test."""
info = _TestInfo(test, time)
info._error = error
return info
class _XMLTestResult(unittest.TestResult):
"""A test result class that stores result as XML.
Used by XMLTestRunner.
"""
def __init__(self, classname):
unittest.TestResult.__init__(self)
self._test_name = classname
self._start_time = None
self._tests = []
self._error = None
self._failure = None
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self._error = None
self._failure = None
self._start_time = time.time()
def stopTest(self, test):
time_taken = time.time() - self._start_time
unittest.TestResult.stopTest(self, test)
if self._error:
info = create_error(test, time_taken, self._error)
elif self._failure:
info = create_failure(test, time_taken, self._failure)
else:
info = create_success(test, time_taken)
self._tests.append(info)
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self._error = err
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self._failure = err
def print_report(self, stream, time_taken, out, err):
"""Prints the XML report to the supplied stream.
The time the tests took to perform as well as the captured standard
output and standard error streams must be passed in.a
"""
stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \
{ "e": len(self.errors), "f": len(self.failures) })
stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \
{
"n": self._test_name,
"t": self.testsRun,
"time": time_taken,
})
for info in self._tests:
info.print_report(stream)
stream.write(' <system-out><![CDATA[%s]]></system-out>\n' % out)
stream.write(' <system-err><![CDATA[%s]]></system-err>\n' % err)
stream.write('</testsuite>\n')
class XMLTestRunner(object):
"""A test runner that stores results in XML format compatible with JUnit.
XMLTestRunner(stream=None) -> XML test runner
The XML file is written to the supplied stream. If stream is None, the
results are stored in a file called TEST-<module>.<class>.xml in the
current working directory (if not overridden with the path property),
where <module> and <class> are the module and class name of the test class.
"""
def __init__(self, *args, **kwargs):
self._stream = kwargs.get('stream')
self._filename = kwargs.get('filename')
self._path = "."
def run(self, test):
"""Run the given test case or test suite."""
class_ = test.__class__
classname = class_.__module__ + "." + class_.__name__
if self._stream == None:
filename = "TEST-%s.xml" % classname
if self._filename:
filename = self._filename
stream = file(os.path.join(self._path, filename), "w")
stream.write('<?xml version="1.0" encoding="utf-8"?>\n')
else:
stream = self._stream
result = _XMLTestResult(classname)
start_time = time.time()
# TODO: Python 2.5: Use the with statement
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
test(result)
try:
out_s = sys.stdout.getvalue()
except AttributeError:
out_s = ""
try:
err_s = sys.stderr.getvalue()
except AttributeError:
err_s = ""
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
time_taken = time.time() - start_time
result.print_report(stream, time_taken, out_s, err_s)
if self._stream == None:
stream.close()
return result
def _set_path(self, path):
self._path = path
path = property(lambda self: self._path, _set_path, None,
"""The path where the XML files are stored.
This property is ignored when the XML file is written to a file
stream.""")
class XMLTestRunnerTest(unittest.TestCase):
def setUp(self):
self._stream = StringIO()
def _try_test_run(self, test_class, expected):
"""Run the test suite against the supplied test class and compare the
XML result against the expected XML string. Fail if the expected
string doesn't match the actual string. All time attribute in the
expected string should have the value "0.000". All error and failure
messages are reduced to "Foobar".
"""
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(test_class))
got = self._stream.getvalue()
# Replace all time="X.YYY" attributes by time="0.000" to enable a
# simple string comparison.
got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got)
# Likewise, replace all failure and error messages by a simple "Foobar"
# string.
got = re.sub(r'(?s)<failure (.*?)>.*?</failure>', r'<failure \1>Foobar</failure>', got)
got = re.sub(r'(?s)<error (.*?)>.*?</error>', r'<error \1>Foobar</error>', got)
self.assertEqual(expected, got)
def test_no_tests(self):
"""Regression test: Check whether a test run without any tests
matches a previous run.
"""
class TestTest(unittest.TestCase):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="0" time="0.000">
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_success(self):
"""Regression test: Check whether a test run with a successful test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_failure(self):
"""Regression test: Check whether a test run with a failing test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
self.assert_(False)
self._try_test_run(TestTest, """<testsuite errors="0" failures="1" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<failure type="exceptions.AssertionError">Foobar</failure>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_error(self):
"""Regression test: Check whether a test run with a erroneous test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
raise IndexError()
self._try_test_run(TestTest, """<testsuite errors="1" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<error type="exceptions.IndexError">Foobar</error>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stdout_capture(self):
"""Regression test: Check whether a test run with output to stdout
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
print("Test")
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[Test
]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stderr_capture(self):
"""Regression test: Check whether a test run with output to stderr
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr.write('Test\n')
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[Test
]]></system-err>
</testsuite>
""")
class NullStream(object):
"""A file-like object that discards everything written to it."""
def write(self, buffer):
pass
def test_unittests_changing_stdout(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stdout, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stdout = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
def test_unittests_changing_stderr(self):
"""Check whether the XMLTestRunner recovers gracefully from unit tests
that change stderr, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
class XMLTestProgram(unittest.TestProgram):
def runTests(self):
if self.testRunner is None:
self.testRunner = XMLTestRunner()
unittest.TestProgram.runTests(self)
main = XMLTestProgram
if __name__ == "__main__":
main(module=None)
| Python |
#!/usr/bin/env python
import unittest
import Cheetah
import Cheetah.Parser
import Cheetah.Template
class Chep_2_Conditionalized_Import_Behavior(unittest.TestCase):
def test_ModuleLevelImport(self):
''' Verify module level (traditional) import behavior '''
pass
def test_InlineImport(self):
''' Verify (new) inline import behavior works '''
template = '''
#def funky($s)
#try
#import urllib
#except ImportError
#pass
#end try
#return urllib.quote($s)
#end def
'''
try:
template = Cheetah.Template.Template.compile(template)
except Cheetah.Parser.ParseError, ex:
self.fail('Failed to properly generate code %s' % ex)
template = template()
rc = tepmlate.funky('abc def')
assert rc == 'abc+def'
def test_LegacyMode(self):
''' Verify disabling of CHEP #2 works '''
pass
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- encoding: utf8 -*-
from Cheetah.Template import Template
from Cheetah import CheetahWrapper
from Cheetah import DummyTransaction
import imp
import os
import sys
import tempfile
import unittest
class CommandLineTest(unittest.TestCase):
def createAndCompile(self, source):
sourcefile = '-'
while sourcefile.find('-') != -1:
sourcefile = tempfile.mktemp()
fd = open('%s.tmpl' % sourcefile, 'w')
fd.write(source)
fd.close()
wrap = CheetahWrapper.CheetahWrapper()
wrap.main(['cheetah', 'compile', '--quiet', '--nobackup', sourcefile])
module_path, module_name = os.path.split(sourcefile)
module = loadModule(module_name, [module_path])
template = getattr(module, module_name)
return template
class JBQ_UTF8_Test1(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test2(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test3(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""Main file with |$v|
$other""")
otherT = Template.compile(source="Other template with |$v|")
other = otherT()
t.other = other
t.v = u'Unicode String with eacute é'
t.other.v = u'Unicode String and an eacute é'
assert unicode(t())
class JBQ_UTF8_Test4(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = 'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test5(unittest.TestCase):
def runTest(self):
t = Template.compile(source="""#encoding utf-8
Main file with |$v| and eacute in the template é""")
t.v = u'Unicode String'
assert unicode(t())
def loadModule(moduleName, path=None):
if path:
assert isinstance(path, list)
try:
mod = sys.modules[moduleName]
except KeyError:
fp = None
try:
fp, pathname, description = imp.find_module(moduleName, path)
mod = imp.load_module(moduleName, fp, pathname, description)
finally:
if fp:
fp.close()
return mod
class JBQ_UTF8_Test6(unittest.TestCase):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
t = Template.compile(source=source)
t.v = u'Unicode String'
assert unicode(t())
class JBQ_UTF8_Test7(CommandLineTest):
def runTest(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
Main file with |$v| and eacute in the template é"""
template = self.createAndCompile(source)
template.v = u'Unicode String'
assert unicode(template())
class JBQ_UTF8_Test8(CommandLineTest):
def testStaticCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = self.createAndCompile(source)()
a = unicode(template).encode("utf-8")
self.assertEquals("Bébé", a)
def testDynamicCompile(self):
source = """#encoding utf-8
#set $someUnicodeString = u"Bébé"
$someUnicodeString"""
template = Template(source = source)
a = unicode(template).encode("utf-8")
self.assertEquals("Bébé", a)
class EncodeUnicodeCompatTest(unittest.TestCase):
"""
Taken initially from Red Hat's bugzilla #529332
https://bugzilla.redhat.com/show_bug.cgi?id=529332
"""
def runTest(self):
t = Template("""Foo ${var}""", filter='EncodeUnicode')
t.var = u"Text with some non-ascii characters: åäö"
rc = t.respond()
assert isinstance(rc, unicode), ('Template.respond() should return unicode', rc)
rc = str(t)
assert isinstance(rc, str), ('Template.__str__() should return a UTF-8 encoded string', rc)
class Unicode_in_SearchList_Test(CommandLineTest):
def test_BasicASCII(self):
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective' : u'neat'}])
assert template.respond()
def test_Thai(self):
# The string is something in Thai
source = '''This is $foo $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'foo' : 'bar',
'adjective' : u'\u0e22\u0e34\u0e19\u0e14\u0e35\u0e15\u0e49\u0e2d\u0e19\u0e23\u0e31\u0e1a'}])
assert template.respond()
def test_Thai_utf8(self):
utf8 = '\xe0\xb8\xa2\xe0\xb8\xb4\xe0\xb8\x99\xe0\xb8\x94\xe0\xb8\xb5\xe0\xb8\x95\xe0\xb9\x89\xe0\xb8\xad\xe0\xb8\x99\xe0\xb8\xa3\xe0\xb8\xb1\xe0\xb8\x9a'
source = '''This is $adjective'''
template = self.createAndCompile(source)
assert template and issubclass(template, Template)
template = template(searchList=[{'adjective' : utf8}])
assert template.respond()
class InlineSpanishTest(unittest.TestCase):
def setUp(self):
super(InlineSpanishTest, self).setUp()
self.template = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Pagina del vendedor</title>
</head>
<body>
$header
<h2>Bienvenido $nombre.</h2>
<br /><br /><br />
<center>
Usted tiene $numpedidos_noconf <a href="">pedidós</a> sin confirmar.
<br /><br />
Bodega tiene fecha para $numpedidos_bodega <a href="">pedidos</a>.
</center>
</body>
</html>
'''
def test_failure(self):
""" Test a template lacking a proper #encoding tag """
self.failUnlessRaises(UnicodeDecodeError, Template, self.template, searchList=[{'header' : '',
'nombre' : '', 'numpedidos_bodega' : '',
'numpedidos_noconf' : ''}])
def test_success(self):
""" Test a template with a proper #encoding tag """
template = '#encoding utf-8\n%s' % self.template
template = Template(template, searchList=[{'header' : '',
'nombre' : '', 'numpedidos_bodega' : '',
'numpedidos_noconf' : ''}])
self.assertTrue(unicode(template))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import sys
import unittest
import Cheetah.Template
import Cheetah.Filters
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
class BasicMarkdownFilterTest(unittest.TestCase):
'''
Test that our markdown filter works
'''
def test_BasicHeader(self):
template = '''
#from Cheetah.Filters import Markdown
#transform Markdown
$foo
Header
======
'''
expected = '''<p>bar</p>
<h1>Header</h1>'''
try:
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template == expected
except ImportError, ex:
print('>>> We probably failed to import markdown, bummer %s' % ex)
return
except Exception, ex:
if ex.__class__.__name__ == 'MarkdownException' and majorVer == 2 and minorVer < 5:
print('>>> NOTE: Support for the Markdown filter will be broken for you. Markdown says: %s' % ex)
return
raise
class BasicCodeHighlighterFilterTest(unittest.TestCase):
'''
Test that our code highlighter filter works
'''
def test_Python(self):
template = '''
#from Cheetah.Filters import CodeHighlighter
#transform CodeHighlighter
def foo(self):
return '$foo'
'''
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template, (template, 'We should have some content here...')
def test_Html(self):
template = '''
#from Cheetah.Filters import CodeHighlighter
#transform CodeHighlighter
<html><head></head><body>$foo</body></html>
'''
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template, (template, 'We should have some content here...')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import sys
import types
import os
import os.path
import unittest
from Cheetah.NameMapper import NotFound, valueForKey, \
valueForName, valueFromSearchList, valueFromFrame, valueFromFrameOrSearchList
class DummyClass:
classVar1 = 123
def __init__(self):
self.instanceVar1 = 123
def __str__(self):
return 'object'
def meth(self, arg="arff"):
return str(arg)
def meth1(self, arg="doo"):
return arg
def meth2(self, arg1="a1", arg2="a2"):
raise ValueError
def meth3(self):
"""Tests a bug that Jeff Johnson reported on Oct 1, 2001"""
x = 'A string'
try:
for i in [1, 2, 3, 4]:
if x == 2:
pass
if x == 'xx':
pass
return x
except:
raise
def dummyFunc(arg="Scooby"):
return arg
def funcThatRaises():
raise ValueError
testNamespace = {
'aStr': 'blarg',
'anInt': 1,
'aFloat': 1.5,
'aDict': {'one': 'item1',
'two': 'item2',
'nestedDict': {'one': 'nestedItem1',
'two': 'nestedItem2',
'funcThatRaises': funcThatRaises,
'aClass': DummyClass,
},
'nestedFunc': dummyFunc,
},
'aClass': DummyClass,
'aFunc': dummyFunc,
'anObj': DummyClass(),
'aMeth': DummyClass().meth1,
'none': None,
'emptyString': '',
'funcThatRaises': funcThatRaises,
}
autoCallResults = {'aFunc': 'Scooby',
'aMeth': 'doo',
}
results = testNamespace.copy()
results.update({'anObj.meth1': 'doo',
'aDict.one': 'item1',
'aDict.nestedDict': testNamespace['aDict']['nestedDict'],
'aDict.nestedDict.one': 'nestedItem1',
'aDict.nestedDict.aClass': DummyClass,
'aDict.nestedFunc': 'Scooby',
'aClass.classVar1': 123,
'anObj.instanceVar1': 123,
'anObj.meth3': 'A string',
})
for k in testNamespace.keys():
# put them in the globals for the valueFromFrame tests
exec('%s = testNamespace[k]'%k)
##################################################
## TEST BASE CLASSES
class NameMapperTest(unittest.TestCase):
failureException = (NotFound, AssertionError)
_testNamespace = testNamespace
_results = results
def namespace(self):
return self._testNamespace
def VFN(self, name, autocall=True):
return valueForName(self.namespace(), name, autocall)
def VFS(self, searchList, name, autocall=True):
return valueFromSearchList(searchList, name, autocall)
# alias to be overriden later
get = VFN
def check(self, name):
got = self.get(name)
if name in autoCallResults:
expected = autoCallResults[name]
else:
expected = self._results[name]
assert got == expected
##################################################
## TEST CASE CLASSES
class VFN(NameMapperTest):
def test1(self):
"""string in dict lookup"""
self.check('aStr')
def test2(self):
"""string in dict lookup in a loop"""
for i in range(10):
self.check('aStr')
def test3(self):
"""int in dict lookup"""
self.check('anInt')
def test4(self):
"""int in dict lookup in a loop"""
for i in range(10):
self.check('anInt')
def test5(self):
"""float in dict lookup"""
self.check('aFloat')
def test6(self):
"""float in dict lookup in a loop"""
for i in range(10):
self.check('aFloat')
def test7(self):
"""class in dict lookup"""
self.check('aClass')
def test8(self):
"""class in dict lookup in a loop"""
for i in range(10):
self.check('aClass')
def test9(self):
"""aFunc in dict lookup"""
self.check('aFunc')
def test10(self):
"""aFunc in dict lookup in a loop"""
for i in range(10):
self.check('aFunc')
def test11(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test12(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test13(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test14(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test15(self):
"""anObj in dict lookup"""
self.check('anObj')
def test16(self):
"""anObj in dict lookup in a loop"""
for i in range(10):
self.check('anObj')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test19(self):
"""aClass.classVar1 in dict lookup"""
self.check('aClass.classVar1')
def test20(self):
"""aClass.classVar1 in dict lookup in a loop"""
for i in range(10):
self.check('aClass.classVar1')
def test23(self):
"""anObj.instanceVar1 in dict lookup"""
self.check('anObj.instanceVar1')
def test24(self):
"""anObj.instanceVar1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.instanceVar1')
## tests 22, 25, and 26 removed when the underscored lookup was removed
def test27(self):
"""anObj.meth1 in dict lookup"""
self.check('anObj.meth1')
def test28(self):
"""anObj.meth1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth1')
def test29(self):
"""aDict.one in dict lookup"""
self.check('aDict.one')
def test30(self):
"""aDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.one')
def test31(self):
"""aDict.nestedDict in dict lookup"""
self.check('aDict.nestedDict')
def test32(self):
"""aDict.nestedDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict')
def test33(self):
"""aDict.nestedDict.one in dict lookup"""
self.check('aDict.nestedDict.one')
def test34(self):
"""aDict.nestedDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.one')
def test35(self):
"""aDict.nestedFunc in dict lookup"""
self.check('aDict.nestedFunc')
def test36(self):
"""aDict.nestedFunc in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedFunc')
def test37(self):
"""aDict.nestedFunc in dict lookup - without autocalling"""
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test38(self):
"""aDict.nestedFunc in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test39(self):
"""aMeth in dict lookup - without autocalling"""
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test40(self):
"""aMeth in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test41(self):
"""anObj.meth3 in dict lookup"""
self.check('anObj.meth3')
def test42(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth3')
def test43(self):
"""NotFound test"""
def test(self=self):
self.get('anObj.methX')
self.assertRaises(NotFound, test)
def test44(self):
"""NotFound test in a loop"""
def test(self=self):
self.get('anObj.methX')
for i in range(10):
self.assertRaises(NotFound, test)
def test45(self):
"""Other exception from meth test"""
def test(self=self):
self.get('anObj.meth2')
self.assertRaises(ValueError, test)
def test46(self):
"""Other exception from meth test in a loop"""
def test(self=self):
self.get('anObj.meth2')
for i in range(10):
self.assertRaises(ValueError, test)
def test47(self):
"""None in dict lookup"""
self.check('none')
def test48(self):
"""None in dict lookup in a loop"""
for i in range(10):
self.check('none')
def test49(self):
"""EmptyString in dict lookup"""
self.check('emptyString')
def test50(self):
"""EmptyString in dict lookup in a loop"""
for i in range(10):
self.check('emptyString')
def test51(self):
"""Other exception from func test"""
def test(self=self):
self.get('funcThatRaises')
self.assertRaises(ValueError, test)
def test52(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('funcThatRaises')
for i in range(10):
self.assertRaises(ValueError, test)
def test53(self):
"""Other exception from func test"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
self.assertRaises(ValueError, test)
def test54(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
for i in range(10):
self.assertRaises(ValueError, test)
def test55(self):
"""aDict.nestedDict.aClass in dict lookup"""
self.check('aDict.nestedDict.aClass')
def test56(self):
"""aDict.nestedDict.aClass in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.aClass')
def test57(self):
"""aDict.nestedDict.aClass in dict lookup - without autocalling"""
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test58(self):
"""aDict.nestedDict.aClass in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test59(self):
"""Other exception from func test -- but without autocalling shouldn't raise"""
self.get('aDict.nestedDict.funcThatRaises', False)
def test60(self):
"""Other exception from func test in a loop -- but without autocalling shouldn't raise"""
for i in range(10):
self.get('aDict.nestedDict.funcThatRaises', False)
class VFS(VFN):
_searchListLength = 1
def searchList(self):
lng = self._searchListLength
if lng == 1:
return [self.namespace()]
elif lng == 2:
return [self.namespace(), {'dummy':1234}]
elif lng == 3:
# a tuple for kicks
return ({'dummy':1234}, self.namespace(), {'dummy':1234})
elif lng == 4:
# a generator for more kicks
return self.searchListGenerator()
def searchListGenerator(self):
class Test:
pass
for i in [Test(), {'dummy':1234}, self.namespace(), {'dummy':1234}]:
yield i
def get(self, name, autocall=True):
return self.VFS(self.searchList(), name, autocall)
class VFS_2namespaces(VFS):
_searchListLength = 2
class VFS_3namespaces(VFS):
_searchListLength = 3
class VFS_4namespaces(VFS):
_searchListLength = 4
class VFF(VFN):
def get(self, name, autocall=True):
ns = self._testNamespace
aStr = ns['aStr']
aFloat = ns['aFloat']
none = 'some'
return valueFromFrame(name, autocall)
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
res['True'] = True
res['False'] = False
res['None'] = None
res['eval'] = eval
def test_VFF_1(self):
"""Builtins"""
self.check('True')
self.check('None')
self.check('False')
assert self.get('eval', False)==eval
assert self.get('range', False)==range
class VFFSL(VFS):
_searchListLength = 1
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
del ns['anInt'] # will be picked up by globals
def VFFSL(self, searchList, name, autocall=True):
anInt = 1
none = 'some'
return valueFromFrameOrSearchList(searchList, name, autocall)
def get(self, name, autocall=True):
return self.VFFSL(self.searchList(), name, autocall)
class VFFSL_2(VFFSL):
_searchListLength = 2
class VFFSL_3(VFFSL):
_searchListLength = 3
class VFFSL_4(VFFSL):
_searchListLength = 4
if sys.platform.startswith('java'):
del VFF, VFFSL, VFFSL_2, VFFSL_3, VFFSL_4
##################################################
## if run from the command line ##
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import hotshot
import hotshot.stats
import os
import sys
import unittest
from test import pystone
import time
import Cheetah.NameMapper
import Cheetah.Template
# This can be turned on with the `--debug` flag when running the test
# and will cause the tests to all just dump out how long they took
# insteasd of asserting on duration
DEBUG = False
# TOLERANCE in Pystones
kPS = 1000
TOLERANCE = 0.5*kPS
class DurationError(AssertionError):
pass
_pystone_calibration_mark = None
def _pystone_calibration():
global _pystone_calibration_mark
if not _pystone_calibration_mark:
_pystone_calibration_mark = pystone.pystones(loops=pystone.LOOPS)
return _pystone_calibration_mark
def perftest(max_num_pystones, current_pystone=None):
'''
Performance test decorator based off the 'timedtest'
decorator found in this Active State recipe:
http://code.activestate.com/recipes/440700/
'''
if not isinstance(max_num_pystones, float):
max_num_pystones = float(max_num_pystones)
if not current_pystone:
current_pystone = _pystone_calibration()
def _test(function):
def wrapper(*args, **kw):
start_time = time.time()
try:
return function(*args, **kw)
finally:
total_time = time.time() - start_time
if total_time == 0:
pystone_total_time = 0
else:
pystone_rate = current_pystone[0] / current_pystone[1]
pystone_total_time = total_time / pystone_rate
global DEBUG
if DEBUG:
print('The test "%s" took: %s pystones' % (function.func_name,
pystone_total_time))
else:
if pystone_total_time > (max_num_pystones + TOLERANCE):
raise DurationError((('Test too long (%.2f Ps, '
'need at most %.2f Ps)')
% (pystone_total_time,
max_num_pystones)))
return wrapper
return _test
class DynamicTemplatePerformanceTest(unittest.TestCase):
loops = 10
#@perftest(1200)
def test_BasicDynamic(self):
template = '''
#def foo(arg1, arg2)
#pass
#end def
'''
for i in range(self.loops):
klass = Cheetah.Template.Template.compile(template)
assert klass
test_BasicDynamic = perftest(1200)(test_BasicDynamic)
class PerformanceTest(unittest.TestCase):
iterations = 100000
display = False
save = False
def runTest(self):
self.prof = hotshot.Profile('%s.prof' % self.__class__.__name__)
self.prof.start()
for i in range(self.iterations):
if hasattr(self, 'performanceSample'):
self.display = True
self.performanceSample()
self.prof.stop()
self.prof.close()
if self.display:
print('>>> %s (%d iterations) ' % (self.__class__.__name__,
self.iterations))
stats = hotshot.stats.load('%s.prof' % self.__class__.__name__)
#stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
if not self.save:
os.unlink('%s.prof' % self.__class__.__name__)
class DynamicMethodCompilationTest(PerformanceTest):
def performanceSample(self):
template = '''
#import sys
#import os
#def testMethod()
#set foo = [1, 2, 3, 4]
#return $foo[0]
#end def
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
value = template.testMethod()
class BunchOfWriteCalls(PerformanceTest):
iterations = 1000
def performanceSample(self):
template = '''
#import sys
#import os
#for i in range(1000)
$i
#end for
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
value = template.respond()
del value
class DynamicSimpleCompilationTest(PerformanceTest):
def performanceSample(self):
template = '''
#import sys
#import os
#set foo = [1,2,3,4]
Well hello there! This is basic.
Here's an array too: $foo
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
template = template()
template = unicode(template)
class FilterTest(PerformanceTest):
template = None
def setUp(self):
super(FilterTest, self).setUp()
template = '''
#import sys
#import os
#set foo = [1, 2, 3, 4]
$foo, $foo, $foo
'''
template = Cheetah.Template.Template.compile(template,
keepRefToGeneratedCode=False)
self.template = template()
def performanceSample(self):
value = unicode(self.template)
class LongCompileTest(PerformanceTest):
''' Test the compilation on a sufficiently large template '''
def compile(self, template):
return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False)
def performanceSample(self):
template = '''
#import sys
#import Cheetah.Template
#extends Cheetah.Template.Template
#def header()
<center><h2>This is my header</h2></center>
#end def
#def footer()
#return "Huzzah"
#end def
#def scripts()
#pass
#end def
#def respond()
<html>
<head>
<title>${title}</title>
$scripts()
</head>
<body>
$header()
#for $i in $range(10)
This is just some stupid page!
<br/>
#end for
<br/>
$footer()
</body>
</html>
#end def
'''
return self.compile(template)
class LongCompile_CompilerSettingsTest(LongCompileTest):
def compile(self, template):
return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False,
compilerSettings={'useStackFrames' : True, 'useAutocalling' : True})
class LongCompileAndRun(LongCompileTest):
def performanceSample(self):
template = super(LongCompileAndRun, self).performanceSample()
template = template(searchList=[{'title' : 'foo'}])
template = template.respond()
if __name__ == '__main__':
if '--debug' in sys.argv:
DEBUG = True
sys.argv = [arg for arg in sys.argv if not arg == '--debug']
unittest.main()
| Python |
#!/usr/bin/env python
import unittest
from Cheetah import DirectiveAnalyzer
class AnalyzerTests(unittest.TestCase):
def test_set(self):
template = '''
#set $foo = "bar"
Hello ${foo}!
'''
calls = DirectiveAnalyzer.analyze(template)
self.assertEquals(1, calls.get('set'))
def test_compilersettings(self):
template = '''
#compiler-settings
useNameMapper = False
#end compiler-settings
'''
calls = DirectiveAnalyzer.analyze(template)
self.assertEquals(1, calls.get('compiler-settings'))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import pdb
import sys
import types
import os
import os.path
import tempfile
import shutil
import unittest
from Cheetah.Template import Template
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
class TemplateTest(unittest.TestCase):
pass
class ClassMethods_compile(TemplateTest):
"""I am using the same Cheetah source for each test to root out clashes
caused by the compile caching in Template.compile().
"""
def test_basicUsage(self):
klass = Template.compile(source='$foo')
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
def test_baseclassArg(self):
klass = Template.compile(source='$foo', baseclass=dict)
t = klass({'foo':1234})
assert str(t)=='1234'
klass2 = Template.compile(source='$foo', baseclass=klass)
t = klass2({'foo':1234})
assert str(t)=='1234'
klass3 = Template.compile(source='#implements dummy\n$bar', baseclass=klass2)
t = klass3({'foo':1234})
assert str(t)=='1234'
klass4 = Template.compile(source='$foo', baseclass='dict')
t = klass4({'foo':1234})
assert str(t)=='1234'
def test_moduleFileCaching(self):
if versionTuple < (2, 3):
return
tmpDir = tempfile.mkdtemp()
try:
#print tmpDir
assert os.path.exists(tmpDir)
klass = Template.compile(source='$foo',
cacheModuleFilesForTracebacks=True,
cacheDirForModuleFiles=tmpDir)
mod = sys.modules[klass.__module__]
#print mod.__file__
assert os.path.exists(mod.__file__)
assert os.path.dirname(mod.__file__)==tmpDir
finally:
shutil.rmtree(tmpDir, True)
def test_classNameArg(self):
klass = Template.compile(source='$foo', className='foo123')
assert klass.__name__=='foo123'
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
def test_moduleNameArg(self):
klass = Template.compile(source='$foo', moduleName='foo99')
mod = sys.modules['foo99']
assert klass.__name__=='foo99'
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
klass = Template.compile(source='$foo',
moduleName='foo1',
className='foo2')
mod = sys.modules['foo1']
assert klass.__name__=='foo2'
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
def test_mainMethodNameArg(self):
klass = Template.compile(source='$foo',
className='foo123',
mainMethodName='testMeth')
assert klass.__name__=='foo123'
t = klass(namespaces={'foo':1234})
#print t.generatedClassCode()
assert str(t)=='1234'
assert t.testMeth()=='1234'
klass = Template.compile(source='$foo',
moduleName='fooXXX',
className='foo123',
mainMethodName='testMeth',
baseclass=dict)
assert klass.__name__=='foo123'
t = klass({'foo':1234})
#print t.generatedClassCode()
assert str(t)=='1234'
assert t.testMeth()=='1234'
def test_moduleGlobalsArg(self):
klass = Template.compile(source='$foo',
moduleGlobals={'foo':1234})
t = klass()
assert str(t)=='1234'
klass2 = Template.compile(source='$foo', baseclass='Test1',
moduleGlobals={'Test1':dict})
t = klass2({'foo':1234})
assert str(t)=='1234'
klass3 = Template.compile(source='$foo', baseclass='Test1',
moduleGlobals={'Test1':dict, 'foo':1234})
t = klass3()
assert str(t)=='1234'
def test_keepRefToGeneratedCodeArg(self):
klass = Template.compile(source='$foo',
className='unique58',
cacheCompilationResults=False,
keepRefToGeneratedCode=False)
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert not t.generatedModuleCode()
klass2 = Template.compile(source='$foo',
className='unique58',
keepRefToGeneratedCode=True)
t = klass2(namespaces={'foo':1234})
assert str(t)=='1234'
assert t.generatedModuleCode()
klass3 = Template.compile(source='$foo',
className='unique58',
keepRefToGeneratedCode=False)
t = klass3(namespaces={'foo':1234})
assert str(t)=='1234'
# still there as this class came from the cache
assert t.generatedModuleCode()
def test_compilationCache(self):
klass = Template.compile(source='$foo',
className='unique111',
cacheCompilationResults=False)
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert not klass._CHEETAH_isInCompilationCache
# this time it will place it in the cache
klass = Template.compile(source='$foo',
className='unique111',
cacheCompilationResults=True)
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert klass._CHEETAH_isInCompilationCache
# by default it will be in the cache
klass = Template.compile(source='$foo',
className='unique999099')
t = klass(namespaces={'foo':1234})
assert str(t)=='1234'
assert klass._CHEETAH_isInCompilationCache
class ClassMethods_subclass(TemplateTest):
def test_basicUsage(self):
klass = Template.compile(source='$foo', baseclass=dict)
t = klass({'foo':1234})
assert str(t)=='1234'
klass2 = klass.subclass(source='$foo')
t = klass2({'foo':1234})
assert str(t)=='1234'
klass3 = klass2.subclass(source='#implements dummy\n$bar')
t = klass3({'foo':1234})
assert str(t)=='1234'
class Preprocessors(TemplateTest):
def test_basicUsage1(self):
src='''\
%set foo = @a
$(@foo*10)
@a'''
src = '\n'.join([ln.strip() for ln in src.splitlines()])
preprocessors = {'tokens':'@ %',
'namespaces':{'a':99}
}
klass = Template.compile(src, preprocessors=preprocessors)
assert str(klass())=='990\n99'
def test_normalizePreprocessorArgVariants(self):
src='%set foo = 12\n%%comment\n$(@foo*10)'
class Settings1: tokens = '@ %'
Settings1 = Settings1()
from Cheetah.Template import TemplatePreprocessor
settings = Template._normalizePreprocessorSettings(Settings1)
preprocObj = TemplatePreprocessor(settings)
def preprocFunc(source, file):
return '$(12*10)', None
class TemplateSubclass(Template):
pass
compilerSettings = {'cheetahVarStartToken': '@',
'directiveStartToken': '%',
'commentStartToken': '%%',
}
for arg in ['@ %',
{'tokens':'@ %'},
{'compilerSettings':compilerSettings},
{'compilerSettings':compilerSettings,
'templateInitArgs':{}},
{'tokens':'@ %',
'templateAPIClass':TemplateSubclass},
Settings1,
preprocObj,
preprocFunc,
]:
klass = Template.compile(src, preprocessors=arg)
assert str(klass())=='120'
def test_complexUsage(self):
src='''\
%set foo = @a
%def func1: #def func(arg): $arg("***")
%% comment
$(@foo*10)
@func1
$func(lambda x:c"--$x--@a")'''
src = '\n'.join([ln.strip() for ln in src.splitlines()])
for arg in [{'tokens':'@ %', 'namespaces':{'a':99} },
{'tokens':'@ %', 'namespaces':{'a':99} },
]:
klass = Template.compile(src, preprocessors=arg)
t = klass()
assert str(t)=='990\n--***--99'
def test_i18n(self):
src='''\
%i18n: This is a $string that needs translation
%i18n id="foo", domain="root": This is a $string that needs translation
'''
src = '\n'.join([ln.strip() for ln in src.splitlines()])
klass = Template.compile(src, preprocessors='@ %', baseclass=dict)
t = klass({'string':'bit of text'})
#print str(t), repr(str(t))
assert str(t)==('This is a bit of text that needs translation\n'*2)[:-1]
class TryExceptImportTest(TemplateTest):
def test_FailCase(self):
''' Test situation where an inline #import statement will get relocated '''
source = '''
#def myFunction()
Ahoy!
#try
#import sys
#except ImportError
$print "This will never happen!"
#end try
#end def
'''
# This should raise an IndentationError (if the bug exists)
klass = Template.compile(source=source, compilerSettings={'useLegacyImportMode' : False})
t = klass(namespaces={'foo' : 1234})
class ClassMethodSupport(TemplateTest):
def test_BasicDecorator(self):
if sys.version_info[0] == 2 and sys.version_info[1] == 3:
print('This version of Python doesn\'t support decorators, skipping tests')
return
template = '''
#@classmethod
#def myClassMethod()
#return '$foo = %s' % $foo
#end def
'''
template = Template.compile(source=template)
try:
rc = template.myClassMethod(foo='bar')
assert rc == '$foo = bar', (rc, 'Template class method didn\'t return what I expected')
except AttributeError, ex:
self.fail(ex)
class StaticMethodSupport(TemplateTest):
def test_BasicDecorator(self):
if sys.version_info[0] == 2 and sys.version_info[1] == 3:
print('This version of Python doesn\'t support decorators, skipping tests')
return
template = '''
#@staticmethod
#def myStaticMethod()
#return '$foo = %s' % $foo
#end def
'''
template = Template.compile(source=template)
try:
rc = template.myStaticMethod(foo='bar')
assert rc == '$foo = bar', (rc, 'Template class method didn\'t return what I expected')
except AttributeError, ex:
self.fail(ex)
class Useless(object):
def boink(self):
return [1, 2, 3]
class MultipleInheritanceSupport(TemplateTest):
def runTest(self):
template = '''
#extends Template, Useless
#def foo()
#return [4,5] + $boink()
#end def
'''
template = Template.compile(template,
moduleGlobals={'Useless' : Useless},
compilerSettings={'autoImportForExtendsDirective' : False})
template = template()
result = template.foo()
assert result == [4, 5, 1, 2, 3], (result, 'Unexpected result')
##################################################
## if run from the command line ##
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import unittest
from Cheetah import SettingsManager
class SettingsManagerTests(unittest.TestCase):
def test_mergeDictionaries(self):
left = {'foo' : 'bar', 'abc' : {'a' : 1, 'b' : 2, 'c' : (3,)}}
right = {'xyz' : (10, 9)}
expect = {'xyz': (10, 9), 'foo': 'bar', 'abc': {'a': 1, 'c': (3,), 'b': 2}}
result = SettingsManager.mergeNestedDictionaries(left, right)
self.assertEquals(result, expect)
if __name__ == '__main__':
unittest.main()
| Python |
#
| Python |
#!/usr/bin/env python
import Cheetah.NameMapper
import Cheetah.Template
import sys
import unittest
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
def isPython23():
''' Python 2.3 is still supported by Cheetah, but doesn't support decorators '''
return majorVer == 2 and minorVer < 4
class GetAttrException(Exception):
pass
class CustomGetAttrClass(object):
def __getattr__(self, name):
raise GetAttrException('FAIL, %s' % name)
class GetAttrTest(unittest.TestCase):
'''
Test for an issue occurring when __getatttr__() raises an exception
causing NameMapper to raise a NotFound exception
'''
def test_ValidException(self):
o = CustomGetAttrClass()
try:
print(o.attr)
except GetAttrException, e:
# expected
return
except:
self.fail('Invalid exception raised: %s' % e)
self.fail('Should have had an exception raised')
def test_NotFoundException(self):
template = '''
#def raiseme()
$obj.attr
#end def'''
template = Cheetah.Template.Template.compile(template, compilerSettings={}, keepRefToGeneratedCode=True)
template = template(searchList=[{'obj' : CustomGetAttrClass()}])
assert template, 'We should have a valid template object by now'
self.failUnlessRaises(GetAttrException, template.raiseme)
class InlineImportTest(unittest.TestCase):
def test_FromFooImportThing(self):
'''
Verify that a bug introduced in v2.1.0 where an inline:
#from module import class
would result in the following code being generated:
import class
'''
template = '''
#def myfunction()
#if True
#from os import path
#return 17
Hello!
#end if
#end def
'''
template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
template = template(searchList=[{}])
assert template, 'We should have a valid template object by now'
rc = template.myfunction()
assert rc == 17, (template, 'Didn\'t get a proper return value')
def test_ImportFailModule(self):
template = '''
#try
#import invalidmodule
#except
#set invalidmodule = dict(FOO='BAR!')
#end try
$invalidmodule.FOO
'''
template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
template = template(searchList=[{}])
assert template, 'We should have a valid template object by now'
assert str(template), 'We weren\'t able to properly generate the result from the template'
def test_ProperImportOfBadModule(self):
template = '''
#from invalid import fail
This should totally $fail
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True)
def test_AutoImporting(self):
template = '''
#extends FakeyTemplate
Boo!
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template)
def test_StuffBeforeImport_Legacy(self):
template = '''
###
### I like comments before import
###
#extends Foo
Bar
'''
self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : True}, keepRefToGeneratedCode=True)
class Mantis_Issue_11_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in Mantis issue #11:
Output:
Traceback (most recent call last):
File "test.py", line 12, in <module>
t.respond()
File "DynamicallyCompiledCheetahTemplate.py", line 86, in respond
File "/usr/lib64/python2.6/cgi.py", line 1035, in escape
s = s.replace("&", "&") # Must be done first!
'''
def test_FailingBehavior(self):
import cgi
template = Cheetah.Template.Template("$escape($request)", searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}])
assert template
self.failUnlessRaises(AttributeError, template.respond)
def test_FailingBehaviorWithSetting(self):
import cgi
template = Cheetah.Template.Template("$escape($request)",
searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}],
compilerSettings={'prioritizeSearchListOverSelf' : True})
assert template
assert template.respond()
class Mantis_Issue_21_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in issue #21
Effectively @staticmethod and @classmethod
decorated methods in templates don't
properly define the _filter local, which breaks
when using the NameMapper
'''
def runTest(self):
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
This is my $output
#end def
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug') # raises a NameError: global name '_filter' is not defined
class Mantis_Issue_22_Regression_Test(unittest.TestCase):
'''
Test case for bug outlined in issue #22
When using @staticmethod and @classmethod
in conjunction with the #filter directive
the generated code for the #filter is reliant
on the `self` local, breaking the function
'''
def test_NoneFilter(self):
# XXX: Disabling this test for now
return
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
#filter None
This is my $output
#end filter
#end def
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug')
def test_DefinedFilter(self):
# XXX: Disabling this test for now
return
if isPython23():
return
template = '''
#@staticmethod
#def testMethod()
#filter Filter
This is my $output
#end filter
#end def
'''
# The generated code for the template's testMethod() should look something
# like this in the 'error' case:
'''
@staticmethod
def testMethod(**KWS):
## CHEETAH: generated from #def testMethod() at line 3, col 13.
trans = DummyTransaction()
_dummyTrans = True
write = trans.response().write
SL = [KWS]
_filter = lambda x, **kwargs: unicode(x)
########################################
## START - generated method body
_orig_filter_18517345 = _filter
filterName = u'Filter'
if self._CHEETAH__filters.has_key("Filter"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u' This is my ')
_v = VFFSL(SL,"output",True) # u'$output' on line 5, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$output')) # from line 5, col 32.
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
'''
template = Cheetah.Template.Template.compile(template)
assert template
assert template.testMethod(output='bug')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
import unittest
from Cheetah import Parser
class ArgListTest(unittest.TestCase):
def setUp(self):
super(ArgListTest, self).setUp()
self.al = Parser.ArgList()
def test_merge1(self):
'''
Testing the ArgList case results from Template.Preprocessors.test_complexUsage
'''
self.al.add_argument('arg')
expect = [('arg', None)]
self.assertEquals(expect, self.al.merge())
def test_merge2(self):
'''
Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test4
'''
self.al.add_argument('a')
self.al.add_default('999')
self.al.next()
self.al.add_argument('b')
self.al.add_default('444')
expect = [(u'a', u'999'), (u'b', u'444')]
self.assertEquals(expect, self.al.merge())
def test_merge3(self):
'''
Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test13
'''
self.al.add_argument('arg')
self.al.add_default("'This is my block'")
expect = [('arg', "'This is my block'")]
self.assertEquals(expect, self.al.merge())
if __name__ == '__main__':
unittest.main()
| Python |
from glob import glob
import os
from os import listdir
import os.path
import re
from tempfile import mktemp
def _escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
return escapeRE.sub(r'\\\1', txt)
def findFiles(*args, **kw):
"""Recursively find all the files matching a glob pattern.
This function is a wrapper around the FileFinder class. See its docstring
for details about the accepted arguments, etc."""
return FileFinder(*args, **kw).files()
def replaceStrInFiles(files, theStr, repl):
"""Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found.
This is like string.replace() on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
pattern = _escapeRegexChars(theStr)
return FindAndReplace(files, pattern, repl).results()
def replaceRegexInFiles(files, pattern, repl):
"""Replace all instances of regex 'pattern' with 'repl' for each file in the
'files' list. Returns a dictionary with data about the matches found.
This is like re.sub on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
return FindAndReplace(files, pattern, repl).results()
##################################################
## CLASSES
class FileFinder:
"""Traverses a directory tree and finds all files in it that match one of
the specified glob patterns."""
def __init__(self, rootPath,
globPatterns=('*',),
ignoreBasenames=('CVS', '.svn'),
ignoreDirs=(),
):
self._rootPath = rootPath
self._globPatterns = globPatterns
self._ignoreBasenames = ignoreBasenames
self._ignoreDirs = ignoreDirs
self._files = []
self.walkDirTree(rootPath)
def walkDirTree(self, dir='.',
listdir=os.listdir,
isdir=os.path.isdir,
join=os.path.join,
):
"""Recursively walk through a directory tree and find matching files."""
processDir = self.processDir
filterDir = self.filterDir
pendingDirs = [dir]
addDir = pendingDirs.append
getDir = pendingDirs.pop
while pendingDirs:
dir = getDir()
## process this dir
processDir(dir)
## and add sub-dirs
for baseName in listdir(dir):
fullPath = join(dir, baseName)
if isdir(fullPath):
if filterDir(baseName, fullPath):
addDir( fullPath )
def filterDir(self, baseName, fullPath):
"""A hook for filtering out certain dirs. """
return not (baseName in self._ignoreBasenames or
fullPath in self._ignoreDirs)
def processDir(self, dir, glob=glob):
extend = self._files.extend
for pattern in self._globPatterns:
extend( glob(os.path.join(dir, pattern)) )
def files(self):
return self._files
class _GenSubberFunc:
"""Converts a 'sub' string in the form that one feeds to re.sub (backrefs,
groups, etc.) into a function that can be used to do the substitutions in
the FindAndReplace class."""
backrefRE = re.compile(r'\\([1-9][0-9]*)')
groupRE = re.compile(r'\\g<([a-zA-Z_][a-zA-Z_]*)>')
def __init__(self, replaceStr):
self._src = replaceStr
self._pos = 0
self._codeChunks = []
self.parse()
def src(self):
return self._src
def pos(self):
return self._pos
def setPos(self, pos):
self._pos = pos
def atEnd(self):
return self._pos >= len(self._src)
def advance(self, offset=1):
self._pos += offset
def readTo(self, to, start=None):
if start == None:
start = self._pos
self._pos = to
if self.atEnd():
return self._src[start:]
else:
return self._src[start:to]
## match and get methods
def matchBackref(self):
return self.backrefRE.match(self.src(), self.pos())
def getBackref(self):
m = self.matchBackref()
self.setPos(m.end())
return m.group(1)
def matchGroup(self):
return self.groupRE.match(self.src(), self.pos())
def getGroup(self):
m = self.matchGroup()
self.setPos(m.end())
return m.group(1)
## main parse loop and the eat methods
def parse(self):
while not self.atEnd():
if self.matchBackref():
self.eatBackref()
elif self.matchGroup():
self.eatGroup()
else:
self.eatStrConst()
def eatStrConst(self):
startPos = self.pos()
while not self.atEnd():
if self.matchBackref() or self.matchGroup():
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
self.addChunk(repr(strConst))
def eatBackref(self):
self.addChunk( 'm.group(' + self.getBackref() + ')' )
def eatGroup(self):
self.addChunk( 'm.group("' + self.getGroup() + '")' )
def addChunk(self, chunk):
self._codeChunks.append(chunk)
## code wrapping methods
def codeBody(self):
return ', '.join(self._codeChunks)
def code(self):
return "def subber(m):\n\treturn ''.join([%s])\n" % (self.codeBody())
def subberFunc(self):
exec(self.code())
return subber
class FindAndReplace:
"""Find and replace all instances of 'patternOrRE' with 'replacement' for
each file in the 'files' list. This is a multi-file version of re.sub().
'patternOrRE' can be a raw regex pattern or
a regex object as generated by the re module. 'replacement' can be any
string that would work with patternOrRE.sub(replacement, fileContents).
"""
def __init__(self, files, patternOrRE, replacement,
recordResults=True):
if isinstance(patternOrRE, basestring):
self._regex = re.compile(patternOrRE)
else:
self._regex = patternOrRE
if isinstance(replacement, basestring):
self._subber = _GenSubberFunc(replacement).subberFunc()
else:
self._subber = replacement
self._pattern = pattern = self._regex.pattern
self._files = files
self._results = {}
self._recordResults = recordResults
## see if we should use pgrep to do the file matching
self._usePgrep = False
if (os.popen3('pgrep')[2].read()).startswith('Usage:'):
## now check to make sure pgrep understands the pattern
tmpFile = mktemp()
open(tmpFile, 'w').write('#')
if not (os.popen3('pgrep "' + pattern + '" ' + tmpFile)[2].read()):
# it didn't print an error msg so we're ok
self._usePgrep = True
os.remove(tmpFile)
self._run()
def results(self):
return self._results
def _run(self):
regex = self._regex
subber = self._subDispatcher
usePgrep = self._usePgrep
pattern = self._pattern
for file in self._files:
if not os.path.isfile(file):
continue # skip dirs etc.
self._currFile = file
found = False
if 'orig' in locals():
del orig
if self._usePgrep:
if os.popen('pgrep "' + pattern + '" ' + file ).read():
found = True
else:
orig = open(file).read()
if regex.search(orig):
found = True
if found:
if 'orig' not in locals():
orig = open(file).read()
new = regex.sub(subber, orig)
open(file, 'w').write(new)
def _subDispatcher(self, match):
if self._recordResults:
if self._currFile not in self._results:
res = self._results[self._currFile] = {}
res['count'] = 0
res['matches'] = []
else:
res = self._results[self._currFile]
res['count'] += 1
res['matches'].append({'contents': match.group(),
'start': match.start(),
'end': match.end(),
}
)
return self._subber(match)
class SourceFileStats:
"""
"""
_fileStats = None
def __init__(self, files):
self._fileStats = stats = {}
for file in files:
stats[file] = self.getFileStats(file)
def rawStats(self):
return self._fileStats
def summary(self):
codeLines = 0
blankLines = 0
commentLines = 0
totalLines = 0
for fileStats in self.rawStats().values():
codeLines += fileStats['codeLines']
blankLines += fileStats['blankLines']
commentLines += fileStats['commentLines']
totalLines += fileStats['totalLines']
stats = {'codeLines': codeLines,
'blankLines': blankLines,
'commentLines': commentLines,
'totalLines': totalLines,
}
return stats
def printStats(self):
pass
def getFileStats(self, fileName):
codeLines = 0
blankLines = 0
commentLines = 0
commentLineRe = re.compile(r'\s#.*$')
blankLineRe = re.compile('\s$')
lines = open(fileName).read().splitlines()
totalLines = len(lines)
for line in lines:
if commentLineRe.match(line):
commentLines += 1
elif blankLineRe.match(line):
blankLines += 1
else:
codeLines += 1
stats = {'codeLines': codeLines,
'blankLines': blankLines,
'commentLines': commentLines,
'totalLines': totalLines,
}
return stats
| Python |
'''
Provides the core API for Cheetah.
See the docstring in the Template class and the Users' Guide for more information
'''
################################################################################
## DEPENDENCIES
import sys # used in the error handling code
import re # used to define the internal delims regex
import new # used to bind methods and create dummy modules
import logging
import string
import os.path
import time # used in the cache refresh code
from random import randrange
import imp
import inspect
import StringIO
import traceback
import pprint
import cgi # Used by .webInput() if the template is a CGI script.
import types
from types import StringType, ClassType
try:
from types import StringTypes
except ImportError:
StringTypes = (types.StringType, types.UnicodeType)
try:
from threading import Lock
except ImportError:
class Lock:
def acquire(self):
pass
def release(self):
pass
try:
x = set()
except NameError:
# Python 2.3 compatibility
from sets import Set as set
from Cheetah.Version import convertVersionStringToTuple, MinCompatibleVersionTuple
from Cheetah.Version import MinCompatibleVersion
# Base classes for Template
from Cheetah.Servlet import Servlet
# More intra-package imports ...
from Cheetah.Parser import ParseError, SourceReader
from Cheetah.Compiler import Compiler, DEFAULT_COMPILER_SETTINGS
from Cheetah import ErrorCatchers # for placeholder tags
from Cheetah import Filters # the output filters
from Cheetah.convertTmplPathToModuleName import convertTmplPathToModuleName
from Cheetah.Utils.Misc import checkKeywords # Used in Template.__init__
from Cheetah.Utils.Indenter import Indenter # Used in Template.__init__ and for
# placeholders
from Cheetah.NameMapper import NotFound, valueFromSearchList
from Cheetah.CacheStore import MemoryCacheStore, MemcachedCacheStore
from Cheetah.CacheRegion import CacheRegion
from Cheetah.Utils.WebInputMixin import _Converter, _lookup, NonNumericInputError
from Cheetah.Unspecified import Unspecified
# Decide whether to use the file modification time in file's cache key
__checkFileMtime = True
def checkFileMtime(value):
globals()['__checkFileMtime'] = value
class Error(Exception):
pass
class PreprocessError(Error):
pass
def hashList(l):
hashedList = []
for v in l:
if isinstance(v, dict):
v = hashDict(v)
elif isinstance(v, list):
v = hashList(v)
hashedList.append(v)
return hash(tuple(hashedList))
def hashDict(d):
items = sorted(d.items())
hashedList = []
for k, v in items:
if isinstance(v, dict):
v = hashDict(v)
elif isinstance(v, list):
v = hashList(v)
hashedList.append((k, v))
return hash(tuple(hashedList))
################################################################################
## MODULE GLOBALS AND CONSTANTS
def _genUniqueModuleName(baseModuleName):
"""The calling code is responsible for concurrency locking.
"""
if baseModuleName not in sys.modules:
finalName = baseModuleName
else:
finalName = ('cheetah_%s_%s_%s'%(baseModuleName,
str(time.time()).replace('.', '_'),
str(randrange(10000, 99999))))
return finalName
# Cache of a cgi.FieldStorage() instance, maintained by .webInput().
# This is only relavent to templates used as CGI scripts.
_formUsedByWebInput = None
def updateLinecache(filename, src):
import linecache
size = len(src)
mtime = time.time()
lines = src.splitlines()
fullname = filename
linecache.cache[filename] = size, mtime, lines, fullname
class CompileCacheItem(object):
pass
class TemplatePreprocessor(object):
'''
This is used with the preprocessors argument to Template.compile().
See the docstring for Template.compile
** Preprocessors are an advanced topic **
'''
def __init__(self, settings):
self._settings = settings
def preprocess(self, source, file):
"""Create an intermediate template and return the source code
it outputs
"""
settings = self._settings
if not source: # @@TR: this needs improving
if isinstance(file, (str, unicode)): # it's a filename.
f = open(file)
source = f.read()
f.close()
elif hasattr(file, 'read'):
source = file.read()
file = None
templateAPIClass = settings.templateAPIClass
possibleKwArgs = [
arg for arg in
inspect.getargs(templateAPIClass.compile.im_func.func_code)[0]
if arg not in ('klass', 'source', 'file',)]
compileKwArgs = {}
for arg in possibleKwArgs:
if hasattr(settings, arg):
compileKwArgs[arg] = getattr(settings, arg)
tmplClass = templateAPIClass.compile(source=source, file=file, **compileKwArgs)
tmplInstance = tmplClass(**settings.templateInitArgs)
outputSource = settings.outputTransformer(tmplInstance)
outputFile = None
return outputSource, outputFile
class Template(Servlet):
'''
This class provides a) methods used by templates at runtime and b)
methods for compiling Cheetah source code into template classes.
This documentation assumes you already know Python and the basics of object
oriented programming. If you don't know Python, see the sections of the
Cheetah Users' Guide for non-programmers. It also assumes you have read
about Cheetah's syntax in the Users' Guide.
The following explains how to use Cheetah from within Python programs or via
the interpreter. If you statically compile your templates on the command
line using the 'cheetah' script, this is not relevant to you. Statically
compiled Cheetah template modules/classes (e.g. myTemplate.py:
MyTemplateClasss) are just like any other Python module or class. Also note,
most Python web frameworks (Webware, Aquarium, mod_python, Turbogears,
CherryPy, Quixote, etc.) provide plugins that handle Cheetah compilation for
you.
There are several possible usage patterns:
1) tclass = Template.compile(src)
t1 = tclass() # or tclass(namespaces=[namespace,...])
t2 = tclass() # or tclass(namespaces=[namespace2,...])
outputStr = str(t1) # or outputStr = t1.aMethodYouDefined()
Template.compile provides a rich and very flexible API via its
optional arguments so there are many possible variations of this
pattern. One example is:
tclass = Template.compile('hello $name from $caller', baseclass=dict)
print tclass(name='world', caller='me')
See the Template.compile() docstring for more details.
2) tmplInstance = Template(src)
# or Template(src, namespaces=[namespace,...])
outputStr = str(tmplInstance) # or outputStr = tmplInstance.aMethodYouDefined(...args...)
Notes on the usage patterns:
usage pattern 1)
This is the most flexible, but it is slightly more verbose unless you
write a wrapper function to hide the plumbing. Under the hood, all
other usage patterns are based on this approach. Templates compiled
this way can #extend (subclass) any Python baseclass: old-style or
new-style (based on object or a builtin type).
usage pattern 2)
This was Cheetah's original usage pattern. It returns an instance,
but you can still access the generated class via
tmplInstance.__class__. If you want to use several different
namespace 'searchLists' with a single template source definition,
you're better off with Template.compile (1).
Limitations (use pattern 1 instead):
- Templates compiled this way can only #extend subclasses of the
new-style 'object' baseclass. Cheetah.Template is a subclass of
'object'. You also can not #extend dict, list, or other builtin
types.
- If your template baseclass' __init__ constructor expects args there
is currently no way to pass them in.
If you need to subclass a dynamically compiled Cheetah class, do something like this:
from Cheetah.Template import Template
T1 = Template.compile('$meth1 #def meth1: this is meth1 in T1')
T2 = Template.compile('#implements meth1\nthis is meth1 redefined in T2', baseclass=T1)
print T1, T1()
print T2, T2()
Note about class and instance attribute names:
Attributes used by Cheetah have a special prefix to avoid confusion with
the attributes of the templates themselves or those of template
baseclasses.
Class attributes which are used in class methods look like this:
klass._CHEETAH_useCompilationCache (_CHEETAH_xxx)
Instance attributes look like this:
klass._CHEETAH__globalSetVars (_CHEETAH__xxx with 2 underscores)
'''
# this is used by ._addCheetahPlumbingCodeToClass()
_CHEETAH_requiredCheetahMethods = (
'_initCheetahInstance',
'searchList',
'errorCatcher',
'getVar',
'varExists',
'getFileContents',
'i18n',
'runAsMainProgram',
'respond',
'shutdown',
'webInput',
'serverSidePath',
'generatedClassCode',
'generatedModuleCode',
'_getCacheStore',
'_getCacheStoreIdPrefix',
'_createCacheRegion',
'getCacheRegion',
'getCacheRegions',
'refreshCache',
'_handleCheetahInclude',
'_getTemplateAPIClassForIncludeDirectiveCompilation',
)
_CHEETAH_requiredCheetahClassMethods = ('subclass',)
_CHEETAH_requiredCheetahClassAttributes = ('cacheRegionClass', 'cacheStore',
'cacheStoreIdPrefix', 'cacheStoreClass')
## the following are used by .compile(). Most are documented in its docstring.
_CHEETAH_cacheModuleFilesForTracebacks = False
_CHEETAH_cacheDirForModuleFiles = None # change to a dirname
_CHEETAH_compileCache = dict() # cache store for compiled code and classes
# To do something other than simple in-memory caching you can create an
# alternative cache store. It just needs to support the basics of Python's
# mapping/dict protocol. E.g.:
# class AdvCachingTemplate(Template):
# _CHEETAH_compileCache = MemoryOrFileCache()
_CHEETAH_compileLock = Lock() # used to prevent race conditions
_CHEETAH_defaultMainMethodName = None
_CHEETAH_compilerSettings = None
_CHEETAH_compilerClass = Compiler
_CHEETAH_compilerInstance = None
_CHEETAH_cacheCompilationResults = True
_CHEETAH_useCompilationCache = True
_CHEETAH_keepRefToGeneratedCode = True
_CHEETAH_defaultBaseclassForTemplates = None
_CHEETAH_defaultClassNameForTemplates = None
# defaults to DEFAULT_COMPILER_SETTINGS['mainMethodName']:
_CHEETAH_defaultMainMethodNameForTemplates = None
_CHEETAH_defaultModuleNameForTemplates = 'DynamicallyCompiledCheetahTemplate'
_CHEETAH_defaultModuleGlobalsForTemplates = None
_CHEETAH_preprocessors = None
_CHEETAH_defaultPreprocessorClass = TemplatePreprocessor
## The following attributes are used by instance methods:
_CHEETAH_generatedModuleCode = None
NonNumericInputError = NonNumericInputError
_CHEETAH_cacheRegionClass = CacheRegion
_CHEETAH_cacheStoreClass = MemoryCacheStore
#_CHEETAH_cacheStoreClass = MemcachedCacheStore
_CHEETAH_cacheStore = None
_CHEETAH_cacheStoreIdPrefix = None
@classmethod
def _getCompilerClass(klass, source=None, file=None):
return klass._CHEETAH_compilerClass
@classmethod
def _getCompilerSettings(klass, source=None, file=None):
return klass._CHEETAH_compilerSettings
@classmethod
def compile(klass, source=None, file=None,
returnAClass=True,
compilerSettings=Unspecified,
compilerClass=Unspecified,
moduleName=None,
className=Unspecified,
mainMethodName=Unspecified,
baseclass=Unspecified,
moduleGlobals=Unspecified,
cacheCompilationResults=Unspecified,
useCache=Unspecified,
preprocessors=Unspecified,
cacheModuleFilesForTracebacks=Unspecified,
cacheDirForModuleFiles=Unspecified,
commandlineopts=None,
keepRefToGeneratedCode=Unspecified,
):
"""
The core API for compiling Cheetah source code into template classes.
This class method compiles Cheetah source code and returns a python
class. You then create template instances using that class. All
Cheetah's other compilation API's use this method under the hood.
Internally, this method a) parses the Cheetah source code and generates
Python code defining a module with a single class in it, b) dynamically
creates a module object with a unique name, c) execs the generated code
in that module's namespace then inserts the module into sys.modules, and
d) returns a reference to the generated class. If you want to get the
generated python source code instead, pass the argument
returnAClass=False.
It caches generated code and classes. See the descriptions of the
arguments'cacheCompilationResults' and 'useCache' for details. This
doesn't mean that templates will automatically recompile themselves when
the source file changes. Rather, if you call Template.compile(src) or
Template.compile(file=path) repeatedly it will attempt to return a
cached class definition instead of recompiling.
Hooks are provided template source preprocessing. See the notes on the
'preprocessors' arg.
If you are an advanced user and need to customize the way Cheetah parses
source code or outputs Python code, you should check out the
compilerSettings argument.
Arguments:
You must provide either a 'source' or 'file' arg, but not both:
- source (string or None)
- file (string path, file-like object, or None)
The rest of the arguments are strictly optional. All but the first
have defaults in attributes of the Template class which can be
overridden in subclasses of this class. Working with most of these is
an advanced topic.
- returnAClass=True
If false, return the generated module code rather than a class.
- compilerSettings (a dict)
Default: Template._CHEETAH_compilerSettings=None
a dictionary of settings to override those defined in
DEFAULT_COMPILER_SETTINGS. These can also be overridden in your
template source code with the #compiler or #compiler-settings
directives.
- compilerClass (a class)
Default: Template._CHEETAH_compilerClass=Cheetah.Compiler.Compiler
a subclass of Cheetah.Compiler.Compiler. Mucking with this is a
very advanced topic.
- moduleName (a string)
Default:
Template._CHEETAH_defaultModuleNameForTemplates
='DynamicallyCompiledCheetahTemplate'
What to name the generated Python module. If the provided value is
None and a file arg was given, the moduleName is created from the
file path. In all cases if the moduleName provided is already in
sys.modules it is passed through a filter that generates a unique
variant of the name.
- className (a string)
Default: Template._CHEETAH_defaultClassNameForTemplates=None
What to name the generated Python class. If the provided value is
None, the moduleName is use as the class name.
- mainMethodName (a string)
Default:
Template._CHEETAH_defaultMainMethodNameForTemplates
=None (and thus DEFAULT_COMPILER_SETTINGS['mainMethodName'])
What to name the main output generating method in the compiled
template class.
- baseclass (a string or a class)
Default: Template._CHEETAH_defaultBaseclassForTemplates=None
Specifies the baseclass for the template without manually
including an #extends directive in the source. The #extends
directive trumps this arg.
If the provided value is a string you must make sure that a class
reference by that name is available to your template, either by
using an #import directive or by providing it in the arg
'moduleGlobals'.
If the provided value is a class, Cheetah will handle all the
details for you.
- moduleGlobals (a dict)
Default: Template._CHEETAH_defaultModuleGlobalsForTemplates=None
A dict of vars that will be added to the global namespace of the
module the generated code is executed in, prior to the execution
of that code. This should be Python values, not code strings!
- cacheCompilationResults (True/False)
Default: Template._CHEETAH_cacheCompilationResults=True
Tells Cheetah to cache the generated code and classes so that they
can be reused if Template.compile() is called multiple times with
the same source and options.
- useCache (True/False)
Default: Template._CHEETAH_useCompilationCache=True
Should the compilation cache be used? If True and a previous
compilation created a cached template class with the same source
code, compiler settings and other options, the cached template
class will be returned.
- cacheModuleFilesForTracebacks (True/False)
Default: Template._CHEETAH_cacheModuleFilesForTracebacks=False
In earlier versions of Cheetah tracebacks from exceptions that
were raised inside dynamically compiled Cheetah templates were
opaque because Python didn't have access to a python source file
to use in the traceback:
File "xxxx.py", line 192, in getTextiledContent
content = str(template(searchList=searchList))
File "cheetah_yyyy.py", line 202, in __str__
File "cheetah_yyyy.py", line 187, in respond
File "cheetah_yyyy.py", line 139, in writeBody
ZeroDivisionError: integer division or modulo by zero
It is now possible to keep those files in a cache dir and allow
Python to include the actual source lines in tracebacks and makes
them much easier to understand:
File "xxxx.py", line 192, in getTextiledContent
content = str(template(searchList=searchList))
File "/tmp/CheetahCacheDir/cheetah_yyyy.py", line 202, in __str__
def __str__(self): return self.respond()
File "/tmp/CheetahCacheDir/cheetah_yyyy.py", line 187, in respond
self.writeBody(trans=trans)
File "/tmp/CheetahCacheDir/cheetah_yyyy.py", line 139, in writeBody
__v = 0/0 # $(0/0)
ZeroDivisionError: integer division or modulo by zero
- cacheDirForModuleFiles (a string representing a dir path)
Default: Template._CHEETAH_cacheDirForModuleFiles=None
See notes on cacheModuleFilesForTracebacks.
- preprocessors
Default: Template._CHEETAH_preprocessors=None
** THIS IS A VERY ADVANCED TOPIC **
These are used to transform the source code prior to compilation.
They provide a way to use Cheetah as a code generator for Cheetah
code. In other words, you use one Cheetah template to output the
source code for another Cheetah template.
The major expected use cases are:
a) 'compile-time caching' aka 'partial template binding',
wherein an intermediate Cheetah template is used to output
the source for the final Cheetah template. The intermediate
template is a mix of a modified Cheetah syntax (the
'preprocess syntax') and standard Cheetah syntax. The
preprocessor syntax is executed at compile time and outputs
Cheetah code which is then compiled in turn. This approach
allows one to completely soft-code all the elements in the
template which are subject to change yet have it compile to
extremely efficient Python code with everything but the
elements that must be variable at runtime (per browser
request, etc.) compiled as static strings. Examples of this
usage pattern will be added to the Cheetah Users' Guide.
The'preprocess syntax' is just Cheetah's standard one with
alternatives for the $ and # tokens:
e.g. '@' and '%' for code like this
@aPreprocessVar $aRuntimeVar
%if aCompileTimeCondition then yyy else zzz
%% preprocessor comment
#if aRunTimeCondition then aaa else bbb
## normal comment
$aRuntimeVar
b) adding #import and #extends directives dynamically based on
the source
If preprocessors are provided, Cheetah pipes the source code
through each one in the order provided. Each preprocessor should
accept the args (source, file) and should return a tuple (source,
file).
The argument value should be a list, but a single non-list value
is acceptable and will automatically be converted into a list.
Each item in the list will be passed through
Template._normalizePreprocessor(). The items should either match
one of the following forms:
- an object with a .preprocess(source, file) method
- a callable with the following signature:
source, file = f(source, file)
or one of the forms below:
- a single string denoting the 2 'tokens' for the preprocess
syntax. The tokens should be in the order (placeholderToken,
directiveToken) and should separated with a space:
e.g. '@ %'
klass = Template.compile(src, preprocessors='@ %')
# or
klass = Template.compile(src, preprocessors=['@ %'])
- a dict with the following keys or an object with the
following attributes (all are optional, but nothing will
happen if you don't provide at least one):
- tokens: same as the single string described above. You can
also provide a tuple of 2 strings.
- searchList: the searchList used for preprocess $placeholders
- compilerSettings: used in the compilation of the intermediate
template
- templateAPIClass: an optional subclass of `Template`
- outputTransformer: a simple hook for passing in a callable
which can do further transformations of the preprocessor
output, or do something else like debug logging. The
default is str().
+ any keyword arguments to Template.compile which you want to
provide for the compilation of the intermediate template.
klass = Template.compile(src,
preprocessors=[ dict(tokens='@ %', searchList=[...]) ] )
"""
errmsg = "arg '%s' must be %s"
if not isinstance(source, (types.NoneType, basestring)):
raise TypeError(errmsg % ('source', 'string or None'))
if not isinstance(file, (types.NoneType, basestring, types.FileType)):
raise TypeError(errmsg %
('file', 'string, file-like object, or None'))
if baseclass is Unspecified:
baseclass = klass._CHEETAH_defaultBaseclassForTemplates
if isinstance(baseclass, Template):
baseclass = baseclass.__class__
if not isinstance(baseclass, (types.NoneType, basestring, types.ClassType, types.TypeType)):
raise TypeError(errmsg % ('baseclass', 'string, class or None'))
if cacheCompilationResults is Unspecified:
cacheCompilationResults = klass._CHEETAH_cacheCompilationResults
if not isinstance(cacheCompilationResults, (int, bool)):
raise TypeError(errmsg % ('cacheCompilationResults', 'boolean'))
if useCache is Unspecified:
useCache = klass._CHEETAH_useCompilationCache
if not isinstance(useCache, (int, bool)):
raise TypeError(errmsg % ('useCache', 'boolean'))
if compilerSettings is Unspecified:
compilerSettings = klass._getCompilerSettings(source, file) or {}
if not isinstance(compilerSettings, dict):
raise TypeError(errmsg % ('compilerSettings', 'dictionary'))
if compilerClass is Unspecified:
compilerClass = klass._getCompilerClass(source, file)
if preprocessors is Unspecified:
preprocessors = klass._CHEETAH_preprocessors
if keepRefToGeneratedCode is Unspecified:
keepRefToGeneratedCode = klass._CHEETAH_keepRefToGeneratedCode
if not isinstance(keepRefToGeneratedCode, (int, bool)):
raise TypeError(errmsg % ('keepReftoGeneratedCode', 'boolean'))
if not isinstance(moduleName, (types.NoneType, basestring)):
raise TypeError(errmsg % ('moduleName', 'string or None'))
__orig_file__ = None
if not moduleName:
if file and isinstance(file, basestring):
moduleName = convertTmplPathToModuleName(file)
__orig_file__ = file
else:
moduleName = klass._CHEETAH_defaultModuleNameForTemplates
if className is Unspecified:
className = klass._CHEETAH_defaultClassNameForTemplates
if not isinstance(className, (types.NoneType, basestring)):
raise TypeError(errmsg % ('className', 'string or None'))
className = re.sub(r'^_+','', className or moduleName)
if mainMethodName is Unspecified:
mainMethodName = klass._CHEETAH_defaultMainMethodNameForTemplates
if not isinstance(mainMethodName, (types.NoneType, basestring)):
raise TypeError(errmsg % ('mainMethodName', 'string or None'))
if moduleGlobals is Unspecified:
moduleGlobals = klass._CHEETAH_defaultModuleGlobalsForTemplates
if cacheModuleFilesForTracebacks is Unspecified:
cacheModuleFilesForTracebacks = klass._CHEETAH_cacheModuleFilesForTracebacks
if not isinstance(cacheModuleFilesForTracebacks, (int, bool)):
raise TypeError(errmsg %
('cacheModuleFilesForTracebacks', 'boolean'))
if cacheDirForModuleFiles is Unspecified:
cacheDirForModuleFiles = klass._CHEETAH_cacheDirForModuleFiles
if not isinstance(cacheDirForModuleFiles, (types.NoneType, basestring)):
raise TypeError(errmsg %
('cacheDirForModuleFiles', 'string or None'))
##################################################
## handle any preprocessors
if preprocessors:
origSrc = source
source, file = klass._preprocessSource(source, file, preprocessors)
##################################################
## compilation, using cache if requested/possible
baseclassValue = None
baseclassName = None
if baseclass:
if isinstance(baseclass, basestring):
baseclassName = baseclass
elif isinstance(baseclass, (types.ClassType, types.TypeType)):
# @@TR: should soft-code this
baseclassName = 'CHEETAH_dynamicallyAssignedBaseClass_'+baseclass.__name__
baseclassValue = baseclass
cacheHash = None
cacheItem = None
if source or isinstance(file, basestring):
compilerSettingsHash = None
if compilerSettings:
compilerSettingsHash = hashDict(compilerSettings)
moduleGlobalsHash = None
if moduleGlobals:
moduleGlobalsHash = hashDict(moduleGlobals)
fileHash = None
if file:
fileHash = str(hash(file))
if globals()['__checkFileMtime']:
fileHash += str(os.path.getmtime(file))
try:
# @@TR: find some way to create a cacheHash that is consistent
# between process restarts. It would allow for caching the
# compiled module on disk and thereby reduce the startup time
# for applications that use a lot of dynamically compiled
# templates.
cacheHash = ''.join([str(v) for v in
[hash(source),
fileHash,
className,
moduleName,
mainMethodName,
hash(compilerClass),
hash(baseclass),
compilerSettingsHash,
moduleGlobalsHash,
hash(cacheDirForModuleFiles),
]])
except:
#@@TR: should add some logging to this
pass
outputEncoding = 'ascii'
compiler = None
if useCache and cacheHash and cacheHash in klass._CHEETAH_compileCache:
cacheItem = klass._CHEETAH_compileCache[cacheHash]
generatedModuleCode = cacheItem.code
else:
compiler = compilerClass(source, file,
moduleName=moduleName,
mainClassName=className,
baseclassName=baseclassName,
mainMethodName=mainMethodName,
settings=(compilerSettings or {}))
if commandlineopts:
compiler.setShBang(commandlineopts.shbang)
compiler.compile()
generatedModuleCode = compiler.getModuleCode()
outputEncoding = compiler.getModuleEncoding()
if not returnAClass:
# This is a bit of a hackish solution to make sure we're setting the proper
# encoding on generated code that is destined to be written to a file
if not outputEncoding == 'ascii':
generatedModuleCode = generatedModuleCode.split('\n')
generatedModuleCode.insert(1, '# -*- coding: %s -*-' % outputEncoding)
generatedModuleCode = '\n'.join(generatedModuleCode)
return generatedModuleCode.encode(outputEncoding)
else:
if cacheItem:
cacheItem.lastCheckoutTime = time.time()
return cacheItem.klass
try:
klass._CHEETAH_compileLock.acquire()
uniqueModuleName = _genUniqueModuleName(moduleName)
__file__ = uniqueModuleName+'.py' # relative file path with no dir part
if cacheModuleFilesForTracebacks:
if not os.path.exists(cacheDirForModuleFiles):
raise Exception('%s does not exist'%cacheDirForModuleFiles)
__file__ = os.path.join(cacheDirForModuleFiles, __file__)
# @@TR: might want to assert that it doesn't already exist
open(__file__, 'w').write(generatedModuleCode)
# @@TR: should probably restrict the perms, etc.
mod = new.module(str(uniqueModuleName))
if moduleGlobals:
for k, v in moduleGlobals.items():
setattr(mod, k, v)
mod.__file__ = __file__
if __orig_file__ and os.path.exists(__orig_file__):
# this is used in the WebKit filemonitoring code
mod.__orig_file__ = __orig_file__
if baseclass and baseclassValue:
setattr(mod, baseclassName, baseclassValue)
##
try:
co = compile(generatedModuleCode, __file__, 'exec')
exec(co, mod.__dict__)
except SyntaxError, e:
try:
parseError = genParserErrorFromPythonException(
source, file, generatedModuleCode, exception=e)
except:
updateLinecache(__file__, generatedModuleCode)
e.generatedModuleCode = generatedModuleCode
raise e
else:
raise parseError
except Exception, e:
updateLinecache(__file__, generatedModuleCode)
e.generatedModuleCode = generatedModuleCode
raise
##
sys.modules[uniqueModuleName] = mod
finally:
klass._CHEETAH_compileLock.release()
templateClass = getattr(mod, className)
if (cacheCompilationResults
and cacheHash
and cacheHash not in klass._CHEETAH_compileCache):
cacheItem = CompileCacheItem()
cacheItem.cacheTime = cacheItem.lastCheckoutTime = time.time()
cacheItem.code = generatedModuleCode
cacheItem.klass = templateClass
templateClass._CHEETAH_isInCompilationCache = True
klass._CHEETAH_compileCache[cacheHash] = cacheItem
else:
templateClass._CHEETAH_isInCompilationCache = False
if keepRefToGeneratedCode or cacheCompilationResults:
templateClass._CHEETAH_generatedModuleCode = generatedModuleCode
# If we have a compiler object, let's set it to the compiler class
# to help the directive analyzer code
if compiler:
templateClass._CHEETAH_compilerInstance = compiler
return templateClass
@classmethod
def subclass(klass, *args, **kws):
"""Takes the same args as the .compile() classmethod and returns a
template that is a subclass of the template this method is called from.
T1 = Template.compile(' foo - $meth1 - bar\n#def meth1: this is T1.meth1')
T2 = T1.subclass('#implements meth1\n this is T2.meth1')
"""
kws['baseclass'] = klass
if isinstance(klass, Template):
templateAPIClass = klass
else:
templateAPIClass = Template
return templateAPIClass.compile(*args, **kws)
@classmethod
def _preprocessSource(klass, source, file, preprocessors):
"""Iterates through the .compile() classmethod's preprocessors argument
and pipes the source code through each each preprocessor.
It returns the tuple (source, file) which is then used by
Template.compile to finish the compilation.
"""
if not isinstance(preprocessors, (list, tuple)):
preprocessors = [preprocessors]
for preprocessor in preprocessors:
preprocessor = klass._normalizePreprocessorArg(preprocessor)
source, file = preprocessor.preprocess(source, file)
return source, file
@classmethod
def _normalizePreprocessorArg(klass, arg):
"""Used to convert the items in the .compile() classmethod's
preprocessors argument into real source preprocessors. This permits the
use of several shortcut forms for defining preprocessors.
"""
if hasattr(arg, 'preprocess'):
return arg
elif hasattr(arg, '__call__'):
class WrapperPreprocessor:
def preprocess(self, source, file):
return arg(source, file)
return WrapperPreprocessor()
else:
class Settings(object):
placeholderToken = None
directiveToken = None
settings = Settings()
if isinstance(arg, str) or isinstance(arg, (list, tuple)):
settings.tokens = arg
elif isinstance(arg, dict):
for k, v in arg.items():
setattr(settings, k, v)
else:
settings = arg
settings = klass._normalizePreprocessorSettings(settings)
return klass._CHEETAH_defaultPreprocessorClass(settings)
@classmethod
def _normalizePreprocessorSettings(klass, settings):
settings.keepRefToGeneratedCode = True
def normalizeSearchList(searchList):
if not isinstance(searchList, (list, tuple)):
searchList = [searchList]
return searchList
def normalizeTokens(tokens):
if isinstance(tokens, str):
return tokens.split() # space delimited string e.g.'@ %'
elif isinstance(tokens, (list, tuple)):
return tokens
else:
raise PreprocessError('invalid tokens argument: %r'%tokens)
if hasattr(settings, 'tokens'):
(settings.placeholderToken,
settings.directiveToken) = normalizeTokens(settings.tokens)
if (not getattr(settings, 'compilerSettings', None)
and not getattr(settings, 'placeholderToken', None) ):
raise TypeError(
'Preprocessor requires either a "tokens" or a "compilerSettings" arg.'
' Neither was provided.')
if not hasattr(settings, 'templateInitArgs'):
settings.templateInitArgs = {}
if 'searchList' not in settings.templateInitArgs:
if not hasattr(settings, 'searchList') and hasattr(settings, 'namespaces'):
settings.searchList = settings.namespaces
elif not hasattr(settings, 'searchList'):
settings.searchList = []
settings.templateInitArgs['searchList'] = settings.searchList
settings.templateInitArgs['searchList'] = (
normalizeSearchList(settings.templateInitArgs['searchList']))
if not hasattr(settings, 'outputTransformer'):
settings.outputTransformer = unicode
if not hasattr(settings, 'templateAPIClass'):
class PreprocessTemplateAPIClass(klass): pass
settings.templateAPIClass = PreprocessTemplateAPIClass
if not hasattr(settings, 'compilerSettings'):
settings.compilerSettings = {}
klass._updateSettingsWithPreprocessTokens(
compilerSettings=settings.compilerSettings,
placeholderToken=settings.placeholderToken,
directiveToken=settings.directiveToken
)
return settings
@classmethod
def _updateSettingsWithPreprocessTokens(
klass, compilerSettings, placeholderToken, directiveToken):
if (placeholderToken and 'cheetahVarStartToken' not in compilerSettings):
compilerSettings['cheetahVarStartToken'] = placeholderToken
if directiveToken:
if 'directiveStartToken' not in compilerSettings:
compilerSettings['directiveStartToken'] = directiveToken
if 'directiveEndToken' not in compilerSettings:
compilerSettings['directiveEndToken'] = directiveToken
if 'commentStartToken' not in compilerSettings:
compilerSettings['commentStartToken'] = directiveToken*2
if 'multiLineCommentStartToken' not in compilerSettings:
compilerSettings['multiLineCommentStartToken'] = (
directiveToken+'*')
if 'multiLineCommentEndToken' not in compilerSettings:
compilerSettings['multiLineCommentEndToken'] = (
'*'+directiveToken)
if 'EOLSlurpToken' not in compilerSettings:
compilerSettings['EOLSlurpToken'] = directiveToken
@classmethod
def _addCheetahPlumbingCodeToClass(klass, concreteTemplateClass):
"""If concreteTemplateClass is not a subclass of Cheetah.Template, add
the required cheetah methods and attributes to it.
This is called on each new template class after it has been compiled.
If concreteTemplateClass is not a subclass of Cheetah.Template but
already has method with the same name as one of the required cheetah
methods, this will skip that method.
"""
for methodname in klass._CHEETAH_requiredCheetahMethods:
if not hasattr(concreteTemplateClass, methodname):
method = getattr(Template, methodname)
newMethod = new.instancemethod(method.im_func, None, concreteTemplateClass)
#print methodname, method
setattr(concreteTemplateClass, methodname, newMethod)
for classMethName in klass._CHEETAH_requiredCheetahClassMethods:
if not hasattr(concreteTemplateClass, classMethName):
meth = getattr(klass, classMethName)
setattr(concreteTemplateClass, classMethName, classmethod(meth.im_func))
for attrname in klass._CHEETAH_requiredCheetahClassAttributes:
attrname = '_CHEETAH_'+attrname
if not hasattr(concreteTemplateClass, attrname):
attrVal = getattr(klass, attrname)
setattr(concreteTemplateClass, attrname, attrVal)
if (not hasattr(concreteTemplateClass, '__str__')
or concreteTemplateClass.__str__ is object.__str__):
mainMethNameAttr = '_mainCheetahMethod_for_'+concreteTemplateClass.__name__
mainMethName = getattr(concreteTemplateClass, mainMethNameAttr, None)
if mainMethName:
def __str__(self):
rc = getattr(self, mainMethName)()
if isinstance(rc, unicode):
return rc.encode('utf-8')
return rc
def __unicode__(self):
return getattr(self, mainMethName)()
elif (hasattr(concreteTemplateClass, 'respond')
and concreteTemplateClass.respond!=Servlet.respond):
def __str__(self):
rc = self.respond()
if isinstance(rc, unicode):
return rc.encode('utf-8')
return rc
def __unicode__(self):
return self.respond()
else:
def __str__(self):
rc = None
if hasattr(self, mainMethNameAttr):
rc = getattr(self, mainMethNameAttr)()
elif hasattr(self, 'respond'):
rc = self.respond()
else:
rc = super(self.__class__, self).__str__()
if isinstance(rc, unicode):
return rc.encode('utf-8')
return rc
def __unicode__(self):
if hasattr(self, mainMethNameAttr):
return getattr(self, mainMethNameAttr)()
elif hasattr(self, 'respond'):
return self.respond()
else:
return super(self.__class__, self).__unicode__()
__str__ = new.instancemethod(__str__, None, concreteTemplateClass)
__unicode__ = new.instancemethod(__unicode__, None, concreteTemplateClass)
setattr(concreteTemplateClass, '__str__', __str__)
setattr(concreteTemplateClass, '__unicode__', __unicode__)
def __init__(self, source=None,
namespaces=None, searchList=None,
# use either or. They are aliases for the same thing.
file=None,
filter='RawOrEncodedUnicode', # which filter from Cheetah.Filters
filtersLib=Filters,
errorCatcher=None,
compilerSettings=Unspecified, # control the behaviour of the compiler
_globalSetVars=None, # used internally for #include'd templates
_preBuiltSearchList=None # used internally for #include'd templates
):
"""a) compiles a new template OR b) instantiates an existing template.
Read this docstring carefully as there are two distinct usage patterns.
You should also read this class' main docstring.
a) to compile a new template:
t = Template(source=aSourceString)
# or
t = Template(file='some/path')
# or
t = Template(file=someFileObject)
# or
namespaces = [{'foo':'bar'}]
t = Template(source=aSourceString, namespaces=namespaces)
# or
t = Template(file='some/path', namespaces=namespaces)
print t
b) to create an instance of an existing, precompiled template class:
## i) first you need a reference to a compiled template class:
tclass = Template.compile(source=src) # or just Template.compile(src)
# or
tclass = Template.compile(file='some/path')
# or
tclass = Template.compile(file=someFileObject)
# or
# if you used the command line compiler or have Cheetah's ImportHooks
# installed your template class is also available via Python's
# standard import mechanism:
from ACompileTemplate import AcompiledTemplate as tclass
## ii) then you create an instance
t = tclass(namespaces=namespaces)
# or
t = tclass(namespaces=namespaces, filter='RawOrEncodedUnicode')
print t
Arguments:
for usage pattern a)
If you are compiling a new template, you must provide either a
'source' or 'file' arg, but not both:
- source (string or None)
- file (string path, file-like object, or None)
Optional args (see below for more) :
- compilerSettings
Default: Template._CHEETAH_compilerSettings=None
a dictionary of settings to override those defined in
DEFAULT_COMPILER_SETTINGS. See
Cheetah.Template.DEFAULT_COMPILER_SETTINGS and the Users' Guide
for details.
You can pass the source arg in as a positional arg with this usage
pattern. Use keywords for all other args.
for usage pattern b)
Do not use positional args with this usage pattern, unless your
template subclasses something other than Cheetah.Template and you
want to pass positional args to that baseclass. E.g.:
dictTemplate = Template.compile('hello $name from $caller', baseclass=dict)
tmplvars = dict(name='world', caller='me')
print dictTemplate(tmplvars)
This usage requires all Cheetah args to be passed in as keyword args.
optional args for both usage patterns:
- namespaces (aka 'searchList')
Default: None
an optional list of namespaces (dictionaries, objects, modules,
etc.) which Cheetah will search through to find the variables
referenced in $placeholders.
If you provide a single namespace instead of a list, Cheetah will
automatically convert it into a list.
NOTE: Cheetah does NOT force you to use the namespaces search list
and related features. It's on by default, but you can turn if off
using the compiler settings useSearchList=False or
useNameMapper=False.
- filter
Default: 'EncodeUnicode'
Which filter should be used for output filtering. This should
either be a string which is the name of a filter in the
'filtersLib' or a subclass of Cheetah.Filters.Filter. . See the
Users' Guide for more details.
- filtersLib
Default: Cheetah.Filters
A module containing subclasses of Cheetah.Filters.Filter. See the
Users' Guide for more details.
- errorCatcher
Default: None
This is a debugging tool. See the Users' Guide for more details.
Do not use this or the #errorCatcher diretive with live
production systems.
Do NOT mess with the args _globalSetVars or _preBuiltSearchList!
"""
errmsg = "arg '%s' must be %s"
errmsgextra = errmsg + "\n%s"
if not isinstance(source, (types.NoneType, basestring)):
raise TypeError(errmsg % ('source', 'string or None'))
if not isinstance(source, (types.NoneType, basestring, types.FileType)):
raise TypeError(errmsg %
('file', 'string, file open for reading, or None'))
if not isinstance(filter, (basestring, types.TypeType)) and not \
(isinstance(filter, types.ClassType) and issubclass(filter, Filters.Filter)):
raise TypeError(errmsgextra %
('filter', 'string or class',
'(if class, must be subclass of Cheetah.Filters.Filter)'))
if not isinstance(filtersLib, (basestring, types.ModuleType)):
raise TypeError(errmsgextra %
('filtersLib', 'string or module',
'(if module, must contain subclasses of Cheetah.Filters.Filter)'))
if not errorCatcher is None:
err = True
if isinstance(errorCatcher, (basestring, types.TypeType)):
err = False
if isinstance(errorCatcher, types.ClassType) and \
issubclass(errorCatcher, ErrorCatchers.ErrorCatcher):
err = False
if err:
raise TypeError(errmsgextra %
('errorCatcher', 'string, class or None',
'(if class, must be subclass of Cheetah.ErrorCatchers.ErrorCatcher)'))
if compilerSettings is not Unspecified:
if not isinstance(compilerSettings, types.DictType):
raise TypeError(errmsg %
('compilerSettings', 'dictionary'))
if source is not None and file is not None:
raise TypeError("you must supply either a source string or the" +
" 'file' keyword argument, but not both")
##################################################
## Do superclass initialization.
super(Template, self).__init__()
##################################################
## Do required version check
if not hasattr(self, '_CHEETAH_versionTuple'):
try:
mod = sys.modules[self.__class__.__module__]
compiledVersion = mod.__CHEETAH_version__
compiledVersionTuple = convertVersionStringToTuple(compiledVersion)
if compiledVersionTuple < MinCompatibleVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
compiledVersion, MinCompatibleVersion))
except AssertionError:
raise
except:
pass
##################################################
## Setup instance state attributes used during the life of template
## post-compile
if searchList:
for namespace in searchList:
if isinstance(namespace, dict):
intersection = self.Reserved_SearchList & set(namespace.keys())
warn = False
if intersection:
warn = True
if isinstance(compilerSettings, dict) and compilerSettings.get('prioritizeSearchListOverSelf'):
warn = False
if warn:
logging.info(''' The following keys are members of the Template class and will result in NameMapper collisions! ''')
logging.info(''' > %s ''' % ', '.join(list(intersection)))
logging.info(''' Please change the key's name or use the compiler setting "prioritizeSearchListOverSelf=True" to prevent the NameMapper from using ''')
logging.info(''' the Template member in place of your searchList variable ''')
self._initCheetahInstance(
searchList=searchList, namespaces=namespaces,
filter=filter, filtersLib=filtersLib,
errorCatcher=errorCatcher,
_globalSetVars=_globalSetVars,
compilerSettings=compilerSettings,
_preBuiltSearchList=_preBuiltSearchList)
##################################################
## Now, compile if we're meant to
if (source is not None) or (file is not None):
self._compile(source, file, compilerSettings=compilerSettings)
def generatedModuleCode(self):
"""Return the module code the compiler generated, or None if no
compilation took place.
"""
return self._CHEETAH_generatedModuleCode
def generatedClassCode(self):
"""Return the class code the compiler generated, or None if no
compilation took place.
"""
return self._CHEETAH_generatedModuleCode[
self._CHEETAH_generatedModuleCode.find('\nclass '):
self._CHEETAH_generatedModuleCode.find('\n## END CLASS DEFINITION')]
def searchList(self):
"""Return a reference to the searchlist
"""
return self._CHEETAH__searchList
def errorCatcher(self):
"""Return a reference to the current errorCatcher
"""
return self._CHEETAH__errorCatcher
## cache methods ##
def _getCacheStore(self):
if not self._CHEETAH__cacheStore:
if self._CHEETAH_cacheStore is not None:
self._CHEETAH__cacheStore = self._CHEETAH_cacheStore
else:
# @@TR: might want to provide a way to provide init args
self._CHEETAH__cacheStore = self._CHEETAH_cacheStoreClass()
return self._CHEETAH__cacheStore
def _getCacheStoreIdPrefix(self):
if self._CHEETAH_cacheStoreIdPrefix is not None:
return self._CHEETAH_cacheStoreIdPrefix
else:
return str(id(self))
def _createCacheRegion(self, regionID):
return self._CHEETAH_cacheRegionClass(
regionID=regionID,
templateCacheIdPrefix=self._getCacheStoreIdPrefix(),
cacheStore=self._getCacheStore())
def getCacheRegion(self, regionID, cacheInfo=None, create=True):
cacheRegion = self._CHEETAH__cacheRegions.get(regionID)
if not cacheRegion and create:
cacheRegion = self._createCacheRegion(regionID)
self._CHEETAH__cacheRegions[regionID] = cacheRegion
return cacheRegion
def getCacheRegions(self):
"""Returns a dictionary of the 'cache regions' initialized in a
template.
Each #cache directive block or $*cachedPlaceholder is a separate 'cache
region'.
"""
# returns a copy to prevent users mucking it up
return self._CHEETAH__cacheRegions.copy()
def refreshCache(self, cacheRegionId=None, cacheItemId=None):
"""Refresh a cache region or a specific cache item within a region.
"""
if not cacheRegionId:
for key, cregion in self.getCacheRegions():
cregion.clear()
else:
cregion = self._CHEETAH__cacheRegions.get(cacheRegionId)
if not cregion:
return
if not cacheItemId: # clear the desired region and all its cacheItems
cregion.clear()
else: # clear one specific cache of a specific region
cache = cregion.getCacheItem(cacheItemId)
if cache:
cache.clear()
## end cache methods ##
def shutdown(self):
"""Break reference cycles before discarding a servlet.
"""
try:
Servlet.shutdown(self)
except:
pass
self._CHEETAH__searchList = None
self.__dict__ = {}
## utility functions ##
def getVar(self, varName, default=Unspecified, autoCall=True):
"""Get a variable from the searchList. If the variable can't be found
in the searchList, it returns the default value if one was given, or
raises NameMapper.NotFound.
"""
try:
return valueFromSearchList(self.searchList(), varName.replace('$', ''), autoCall)
except NotFound:
if default is not Unspecified:
return default
else:
raise
def varExists(self, varName, autoCall=True):
"""Test if a variable name exists in the searchList.
"""
try:
valueFromSearchList(self.searchList(), varName.replace('$', ''), autoCall)
return True
except NotFound:
return False
hasVar = varExists
def i18n(self, message,
plural=None,
n=None,
id=None,
domain=None,
source=None,
target=None,
comment=None
):
"""This is just a stub at this time.
plural = the plural form of the message
n = a sized argument to distinguish between single and plural forms
id = msgid in the translation catalog
domain = translation domain
source = source lang
target = a specific target lang
comment = a comment to the translation team
See the following for some ideas
http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport
Other notes:
- There is no need to replicate the i18n:name attribute from plone / PTL,
as cheetah placeholders serve the same purpose
"""
return message
def getFileContents(self, path):
"""A hook for getting the contents of a file. The default
implementation just uses the Python open() function to load local files.
This method could be reimplemented to allow reading of remote files via
various protocols, as PHP allows with its 'URL fopen wrapper'
"""
fp = open(path, 'r')
output = fp.read()
fp.close()
return output
def runAsMainProgram(self):
"""Allows the Template to function as a standalone command-line program
for static page generation.
Type 'python yourtemplate.py --help to see what it's capabable of.
"""
from TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=self).run()
##################################################
## internal methods -- not to be called by end-users
def _initCheetahInstance(self,
searchList=None,
namespaces=None,
filter='RawOrEncodedUnicode', # which filter from Cheetah.Filters
filtersLib=Filters,
errorCatcher=None,
_globalSetVars=None,
compilerSettings=None,
_preBuiltSearchList=None):
"""Sets up the instance attributes that cheetah templates use at
run-time.
This is automatically called by the __init__ method of compiled
templates.
Note that the names of instance attributes used by Cheetah are prefixed
with '_CHEETAH__' (2 underscores), where class attributes are prefixed
with '_CHEETAH_' (1 underscore).
"""
if getattr(self, '_CHEETAH__instanceInitialized', False):
return
if namespaces is not None:
assert searchList is None, (
'Provide "namespaces" or "searchList", not both!')
searchList = namespaces
if searchList is not None and not isinstance(searchList, (list, tuple)):
searchList = [searchList]
self._CHEETAH__globalSetVars = {}
if _globalSetVars is not None:
# this is intended to be used internally by Nested Templates in #include's
self._CHEETAH__globalSetVars = _globalSetVars
if _preBuiltSearchList is not None:
# happens with nested Template obj creation from #include's
self._CHEETAH__searchList = list(_preBuiltSearchList)
self._CHEETAH__searchList.append(self)
else:
# create our own searchList
self._CHEETAH__searchList = [self._CHEETAH__globalSetVars, self]
if searchList is not None:
if isinstance(compilerSettings, dict) and compilerSettings.get('prioritizeSearchListOverSelf'):
self._CHEETAH__searchList = searchList + self._CHEETAH__searchList
else:
self._CHEETAH__searchList.extend(list(searchList))
self._CHEETAH__cheetahIncludes = {}
self._CHEETAH__cacheRegions = {}
self._CHEETAH__indenter = Indenter()
# @@TR: consider allowing simple callables as the filter argument
self._CHEETAH__filtersLib = filtersLib
self._CHEETAH__filters = {}
if isinstance(filter, basestring):
filterName = filter
klass = getattr(self._CHEETAH__filtersLib, filterName)
else:
klass = filter
filterName = klass.__name__
self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName] = klass(self).filter
self._CHEETAH__initialFilter = self._CHEETAH__currentFilter
self._CHEETAH__errorCatchers = {}
if errorCatcher:
if isinstance(errorCatcher, basestring):
errorCatcherClass = getattr(ErrorCatchers, errorCatcher)
elif isinstance(errorCatcher, ClassType):
errorCatcherClass = errorCatcher
self._CHEETAH__errorCatcher = ec = errorCatcherClass(self)
self._CHEETAH__errorCatchers[errorCatcher.__class__.__name__] = ec
else:
self._CHEETAH__errorCatcher = None
self._CHEETAH__initErrorCatcher = self._CHEETAH__errorCatcher
if not hasattr(self, 'transaction'):
self.transaction = None
self._CHEETAH__instanceInitialized = True
self._CHEETAH__isBuffering = False
self._CHEETAH__isControlledByWebKit = False
self._CHEETAH__cacheStore = None
if self._CHEETAH_cacheStore is not None:
self._CHEETAH__cacheStore = self._CHEETAH_cacheStore
def _compile(self, source=None, file=None, compilerSettings=Unspecified,
moduleName=None, mainMethodName=None):
"""Compile the template. This method is automatically called by
Template.__init__ it is provided with 'file' or 'source' args.
USERS SHOULD *NEVER* CALL THIS METHOD THEMSELVES. Use Template.compile
instead.
"""
if compilerSettings is Unspecified:
compilerSettings = self._getCompilerSettings(source, file) or {}
mainMethodName = mainMethodName or self._CHEETAH_defaultMainMethodName
self._fileMtime = None
self._fileDirName = None
self._fileBaseName = None
if file and isinstance(file, basestring):
file = self.serverSidePath(file)
self._fileMtime = os.path.getmtime(file)
self._fileDirName, self._fileBaseName = os.path.split(file)
self._filePath = file
templateClass = self.compile(source, file,
moduleName=moduleName,
mainMethodName=mainMethodName,
compilerSettings=compilerSettings,
keepRefToGeneratedCode=True)
self.__class__ = templateClass
# must initialize it so instance attributes are accessible
templateClass.__init__(self,
#_globalSetVars=self._CHEETAH__globalSetVars,
#_preBuiltSearchList=self._CHEETAH__searchList
)
if not hasattr(self, 'transaction'):
self.transaction = None
def _handleCheetahInclude(self, srcArg, trans=None, includeFrom='file', raw=False):
"""Called at runtime to handle #include directives.
"""
_includeID = srcArg
if _includeID not in self._CHEETAH__cheetahIncludes:
if not raw:
if includeFrom == 'file':
source = None
if type(srcArg) in StringTypes:
if hasattr(self, 'serverSidePath'):
file = path = self.serverSidePath(srcArg)
else:
file = path = os.path.normpath(srcArg)
else:
file = srcArg ## a file-like object
else:
source = srcArg
file = None
# @@TR: might want to provide some syntax for specifying the
# Template class to be used for compilation so compilerSettings
# can be changed.
compiler = self._getTemplateAPIClassForIncludeDirectiveCompilation(source, file)
nestedTemplateClass = compiler.compile(source=source, file=file)
nestedTemplate = nestedTemplateClass(_preBuiltSearchList=self.searchList(),
_globalSetVars=self._CHEETAH__globalSetVars)
# Set the inner template filters to the initial filter of the
# outer template:
# this is the only really safe way to use
# filter='WebSafe'.
nestedTemplate._CHEETAH__initialFilter = self._CHEETAH__initialFilter
nestedTemplate._CHEETAH__currentFilter = self._CHEETAH__initialFilter
self._CHEETAH__cheetahIncludes[_includeID] = nestedTemplate
else:
if includeFrom == 'file':
path = self.serverSidePath(srcArg)
self._CHEETAH__cheetahIncludes[_includeID] = self.getFileContents(path)
else:
self._CHEETAH__cheetahIncludes[_includeID] = srcArg
##
if not raw:
self._CHEETAH__cheetahIncludes[_includeID].respond(trans)
else:
trans.response().write(self._CHEETAH__cheetahIncludes[_includeID])
def _getTemplateAPIClassForIncludeDirectiveCompilation(self, source, file):
"""Returns the subclass of Template which should be used to compile
#include directives.
This abstraction allows different compiler settings to be used in the
included template than were used in the parent.
"""
if issubclass(self.__class__, Template):
return self.__class__
else:
return Template
## functions for using templates as CGI scripts
def webInput(self, names, namesMulti=(), default='', src='f',
defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False):
"""Method for importing web transaction variables in bulk.
This works for GET/POST fields both in Webware servlets and in CGI
scripts, and for cookies and session variables in Webware servlets. If
you try to read a cookie or session variable in a CGI script, you'll get
a RuntimeError. 'In a CGI script' here means 'not running as a Webware
servlet'. If the CGI environment is not properly set up, Cheetah will
act like there's no input.
The public method provided is:
def webInput(self, names, namesMulti=(), default='', src='f',
defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False):
This method places the specified GET/POST fields, cookies or session
variables into a dictionary, which is both returned and put at the
beginning of the searchList. It handles:
* single vs multiple values
* conversion to integer or float for specified names
* default values/exceptions for missing or bad values
* printing a snapshot of all values retrieved for debugging
All the 'default*' and 'bad*' arguments have 'use or raise' behavior,
meaning that if they're a subclass of Exception, they're raised. If
they're anything else, that value is substituted for the missing/bad
value.
The simplest usage is:
#silent $webInput(['choice'])
$choice
dic = self.webInput(['choice'])
write(dic['choice'])
Both these examples retrieves the GET/POST field 'choice' and print it.
If you leave off the'#silent', all the values would be printed too. But
a better way to preview the values is
#silent $webInput(['name'], $debug=1)
because this pretty-prints all the values inside HTML <PRE> tags.
** KLUDGE: 'debug' is supposed to insert into the template output, but it
wasn't working so I changed it to a'print' statement. So the debugging
output will appear wherever standard output is pointed, whether at the
terminal, in a Webware log file, or whatever. ***
Since we didn't specify any coversions, the value is a string. It's a
'single' value because we specified it in 'names' rather than
'namesMulti'. Single values work like this:
* If one value is found, take it.
* If several values are found, choose one arbitrarily and ignore the rest.
* If no values are found, use or raise the appropriate 'default*' value.
Multi values work like this:
* If one value is found, put it in a list.
* If several values are found, leave them in a list.
* If no values are found, use the empty list ([]). The 'default*'
arguments are *not* consulted in this case.
Example: assume 'days' came from a set of checkboxes or a multiple combo
box on a form, and the user chose'Monday', 'Tuesday' and 'Thursday'.
#silent $webInput([], ['days'])
The days you chose are: #slurp
#for $day in $days
$day #slurp
#end for
dic = self.webInput([], ['days'])
write('The days you chose are: ')
for day in dic['days']:
write(day + ' ')
Both these examples print: 'The days you chose are: Monday Tuesday Thursday'.
By default, missing strings are replaced by '' and missing/bad numbers
by zero. (A'bad number' means the converter raised an exception for
it, usually because of non-numeric characters in the value.) This
mimics Perl/PHP behavior, and simplifies coding for many applications
where missing/bad values *should* be blank/zero. In those relatively
few cases where you must distinguish between empty-string/zero on the
one hand and missing/bad on the other, change the appropriate
'default*' and 'bad*' arguments to something like:
* None
* another constant value
* $NonNumericInputError/self.NonNumericInputError
* $ValueError/ValueError
(NonNumericInputError is defined in this class and is useful for
distinguishing between bad input vs a TypeError/ValueError thrown for
some other rason.)
Here's an example using multiple values to schedule newspaper
deliveries. 'checkboxes' comes from a form with checkboxes for all the
days of the week. The days the user previously chose are preselected.
The user checks/unchecks boxes as desired and presses Submit. The value
of 'checkboxes' is a list of checkboxes that were checked when Submit
was pressed. Our task now is to turn on the days the user checked, turn
off the days he unchecked, and leave on or off the days he didn't
change.
dic = self.webInput([], ['dayCheckboxes'])
wantedDays = dic['dayCheckboxes'] # The days the user checked.
for day, on in self.getAllValues():
if not on and wantedDays.has_key(day):
self.TurnOn(day)
# ... Set a flag or insert a database record ...
elif on and not wantedDays.has_key(day):
self.TurnOff(day)
# ... Unset a flag or delete a database record ...
'source' allows you to look up the variables from a number of different
sources:
'f' fields (CGI GET/POST parameters)
'c' cookies
's' session variables
'v' 'values', meaning fields or cookies
In many forms, you're dealing only with strings, which is why the
'default' argument is third and the numeric arguments are banished to
the end. But sometimes you want automatic number conversion, so that
you can do numeric comparisions in your templates without having to
write a bunch of conversion/exception handling code. Example:
#silent $webInput(['name', 'height:int'])
$name is $height cm tall.
#if $height >= 300
Wow, you're tall!
#else
Pshaw, you're short.
#end if
dic = self.webInput(['name', 'height:int'])
name = dic[name]
height = dic[height]
write('%s is %s cm tall.' % (name, height))
if height > 300:
write('Wow, you're tall!')
else:
write('Pshaw, you're short.')
To convert a value to a number, suffix ':int' or ':float' to the name.
The method will search first for a 'height:int' variable and then for a
'height' variable. (It will be called 'height' in the final
dictionary.) If a numeric conversion fails, use or raise 'badInt' or
'badFloat'. Missing values work the same way as for strings, except the
default is 'defaultInt' or 'defaultFloat' instead of 'default'.
If a name represents an uploaded file, the entire file will be read into
memory. For more sophistocated file-upload handling, leave that name
out of the list and do your own handling, or wait for
Cheetah.Utils.UploadFileMixin.
This only in a subclass that also inherits from Webware's Servlet or
HTTPServlet. Otherwise you'll get an AttributeError on 'self.request'.
EXCEPTIONS: ValueError if 'source' is not one of the stated characters.
TypeError if a conversion suffix is not ':int' or ':float'.
FUTURE EXPANSION: a future version of this method may allow source
cascading; e.g., 'vs' would look first in 'values' and then in session
variables.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.186 $
Start Date: 2002/03/17
Last Revision Date: $Date: 2008/03/10 04:48:11 $
"""
src = src.lower()
isCgi = not self._CHEETAH__isControlledByWebKit
if isCgi and src in ('f', 'v'):
global _formUsedByWebInput
if _formUsedByWebInput is None:
_formUsedByWebInput = cgi.FieldStorage()
source, func = 'field', _formUsedByWebInput.getvalue
elif isCgi and src == 'c':
raise RuntimeError("can't get cookies from a CGI script")
elif isCgi and src == 's':
raise RuntimeError("can't get session variables from a CGI script")
elif isCgi and src == 'v':
source, func = 'value', self.request().value
elif isCgi and src == 's':
source, func = 'session', self.request().session().value
elif src == 'f':
source, func = 'field', self.request().field
elif src == 'c':
source, func = 'cookie', self.request().cookie
elif src == 'v':
source, func = 'value', self.request().value
elif src == 's':
source, func = 'session', self.request().session().value
else:
raise TypeError("arg 'src' invalid")
sources = source + 's'
converters = {
'': _Converter('string', None, default, default ),
'int': _Converter('int', int, defaultInt, badInt ),
'float': _Converter('float', float, defaultFloat, badFloat), }
#pprint.pprint(locals()); return {}
dic = {} # Destination.
for name in names:
k, v = _lookup(name, func, False, converters)
dic[k] = v
for name in namesMulti:
k, v = _lookup(name, func, True, converters)
dic[k] = v
# At this point, 'dic' contains all the keys/values we want to keep.
# We could split the method into a superclass
# method for Webware/WebwareExperimental and a subclass for Cheetah.
# The superclass would merely 'return dic'. The subclass would
# 'dic = super(ThisClass, self).webInput(names, namesMulti, ...)'
# and then the code below.
if debug:
print("<PRE>\n" + pprint.pformat(dic) + "\n</PRE>\n\n")
self.searchList().insert(0, dic)
return dic
T = Template # Short and sweet for debugging at the >>> prompt.
Template.Reserved_SearchList = set(dir(Template))
def genParserErrorFromPythonException(source, file, generatedPyCode, exception):
#print dir(exception)
filename = isinstance(file, (str, unicode)) and file or None
sio = StringIO.StringIO()
traceback.print_exc(1, sio)
formatedExc = sio.getvalue()
if hasattr(exception, 'lineno'):
pyLineno = exception.lineno
else:
pyLineno = int(re.search('[ \t]*File.*line (\d+)', formatedExc).group(1))
lines = generatedPyCode.splitlines()
prevLines = [] # (i, content)
for i in range(1, 4):
if pyLineno-i <=0:
break
prevLines.append( (pyLineno+1-i, lines[pyLineno-i]) )
nextLines = [] # (i, content)
for i in range(1, 4):
if not pyLineno+i < len(lines):
break
nextLines.append( (pyLineno+i, lines[pyLineno+i]) )
nextLines.reverse()
report = 'Line|Python Code\n'
report += '----|-------------------------------------------------------------\n'
while prevLines:
lineInfo = prevLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
if hasattr(exception, 'offset'):
report += ' '*(3+(exception.offset or 0)) + '^\n'
while nextLines:
lineInfo = nextLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
message = [
"Error in the Python code which Cheetah generated for this template:",
'='*80,
'',
str(exception),
'',
report,
'='*80,
]
cheetahPosMatch = re.search('line (\d+), col (\d+)', formatedExc)
if cheetahPosMatch:
lineno = int(cheetahPosMatch.group(1))
col = int(cheetahPosMatch.group(2))
#if hasattr(exception, 'offset'):
# col = exception.offset
message.append('\nHere is the corresponding Cheetah code:\n')
else:
lineno = None
col = None
cheetahPosMatch = re.search('line (\d+), col (\d+)',
'\n'.join(lines[max(pyLineno-2, 0):]))
if cheetahPosMatch:
lineno = int(cheetahPosMatch.group(1))
col = int(cheetahPosMatch.group(2))
message.append('\nHere is the corresponding Cheetah code.')
message.append('** I had to guess the line & column numbers,'
' so they are probably incorrect:\n')
message = '\n'.join(message)
reader = SourceReader(source, filename=filename)
return ParseError(reader, message, lineno=lineno, col=col)
# vim: shiftwidth=4 tabstop=4 expandtab
| Python |
#
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.