code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#Prueba de Puertos Con clase
from Ports import Ports
Puertos = Ports("PortConf.cf","Constantes.cf")
Puertos.SetPortConfig("PORT00","input")
Puertos.SetPortConfig("PORT01","input")
Puertos.SetPortConfig("PORT02","input")
Puertos.SetPortConfig("PORT03","output")
Puertos.SetPortConfig("PORT04","undef")
Puertos.SetPortConfig("PORT05","undef")
Puertos.SetPortConfig("PORT06","undef")
Puertos.SetPortConfig("PORT07","undef")
Puertos.SetPortConfig("PORT08","undef")
Puertos.SetPortConfig("PORT09","undef")
Puertos.SetPortConfig("PORT10","undef")
Puertos.SetPortConfig("PORT11","undef")
Puertos.SetPortConfig("PORT12","output")
Puertos.SetPortConfig("PORT13","output")
Puertos.SetPortConfig("PORT14","output")
Puertos.SetPortConfig("PORT15","output")
Puertos.SetPortConfig("PORT16","sensor")
print Puertos.BuscarPorTipo("input"
print ""
print Puertos.BuscarPorTipo("sensor")
print ""
print Puertos.BuscarPorTipo("output")
| Python |
import subprocess
import re
import sys
import time
import datetime
class Sensor:
def __init__(self,N_sensor,N_pin):
self.Senc = str(N_sensor).strip()
self.Pin = str(N_pin).strip()
#print self.Senc,self.Pin
def Obtener_Datos(self):
cadena = "sudo ./Adafruit_DHT "+str(self.Senc)+" "+str(self.Pin.strip())
#print subprocess.check_output(["./Adafruit_DHT",str(11),str(4)])
try:
self.output = subprocess.check_output(["./Adafruit_DHT",self.Senc,self.Pin])
#print "ACA"
matches = re.search("Temp =\s+([0-9.]+)", self.output)
temp = str(matches.group(1))
matches = re.search("Hum =\s+([0-9.]+)", self.output)
humidity = str(matches.group(1))
return temp+"_"+humidity
except:
return "error"
| Python |
#Constantes
PORT00 = 8
PORT01 = 10
PORT02 = 12
PORT03 = 16
PORT04 = 3
PORT05 = 5
PORT06 = 7
PORT07 = 11
PORT08 = 13
PORT09 = 15
PORT10 = 19
PORT11 = 21
PORT12 = 23
PORT13 = 18
PORT14 = 22
PORT15 = 24
PORT16 = 26
DHT11 = 11
DHT22 = 22
sarasa
| Python |
#Configurator
#Este Modulo se encarga de leer y escribir las lineas del archivo
#De Configuracion
class Configurator:
def __init__(self,ConfigFile):
self.ArchivoConfiguracion = ConfigFile
def CrearArchivoDefault(self):
self.__NuevoArchivoConfig()
def GetIpAddress(self):
return self.__BuscarParmetro("IPAddress")
def SetIpAdrees(self,IP):
self.__ModificarParmetro("IPAddress",IP)
def GetNetMask(self):
return self.__BuscarParmetro("NetMask")
def SetNetMask(self,NetMask):
self.__ModificarParmetro("NetMask",NetMask)
def GetGateway(self):
return self.__BuscarParmetro("Gateway")
def SetGateway(self,Gateway):
self.__ModificarParmetro("Gateway",Gateway)
def GetNombreDispositivo(self):
return self.__BuscarParmetro("Nombre")
def SetNombreDispositivo(self,Nombre):
self.__ModificarParmetro("Nombre",Nombre)
def GetCodigoCliente(self):
return self.__BuscarParmetro("ClientCode")
def SetCodigoCliente(self,Code):
self.__ModificarParmetro("ClientCode",Code)
def GetServerAddress(self):
return self.__BuscarParmetro("Server")
def SetServerAddres(self,Server):
self.__ModificarParmetro("Server",Server)
#----------Metodos Privados De Manejo De Strings----------------------
def __BuscarParmetro(self,Parametro):
Archivo = open(self.ArchivoConfiguracion,"r")
for Linea in Archivo:
Param,Valor = Linea.split("=")
if(Param.strip() == Parametro):
return Valor.strip()
Archivo.close()
def __NuevoArchivoConfig(self):
Archivo = open(self.ArchivoConfiguracion,"w")
IP = "IPAddress=192.168.1.10\n"
NetMask = "NetMask=255.255.255.0\n"
Gate = "Gateway=192.168.1.1\n"
Nombre = "Nombre=InXource01\n"
Cliente = "ClientCode=0000000\n"
Server = "Server=inxource.no-ip.org"
Lista = [IP,NetMask,Gate,Nombre,Cliente,Server]
Archivo.writelines(Lista)
Archivo.close()
def __ModificarParmetro(self,Parametro,NuevoValor):
Archivo = open(self.ArchivoConfiguracion,"r")
NuevoArchivo = []
for Linea in Archivo:
Param, Valor = Linea.split("=")
if(Param.strip() == Parametro):
Linea = Param.strip()+"="+NuevoValor+"\n"
NuevoArchivo.append(Linea)
else:
Linea = Param.strip()+"="+Valor
NuevoArchivo.append(Linea)
Archivo.close()
Archivo = open(self.ArchivoConfiguracion,"w")
Archivo.writelines(NuevoArchivo)
Archivo.close()
#Ejemplo de Implementacion
#conf = Configurator("Config.cf") Por parametro le paso el archivo de configuracion
#IP = conf.GetIPAddress() Devuelve la direccion IP Almacenada en el archivo de configuracion
#conf.SetIpAddress("192.168.1.1")
| Python |
# 2013/5/1 version beta 0.6.4.9
import RPi.GPIO as GPIO
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(27,GPIO.OUT)
GPIO.setup(22,GPIO.OUT)
GPIO.setup(10,GPIO.OUT)
GPIO.setup(9,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(24,GPIO.OUT)
GPIO.setup(25,GPIO.OUT)
GPIO.setup(8,GPIO.OUT)
#s = raw_input('--> ')
while True:
if GPIO.input(27):
print "encendido port08"
elif GPIO.input(22):
print "encendido port09"
elif GPIO.input(10):
print "encendido port10"
elif GPIO.input(9):
print "encendido port11"
elif GPIO.input(11):
print "encendido port12"
elif GPIO.input(24):
print "encendido port13"
elif GPIO.input(25):
print "encendido port14"
elif GPIO.input(8):
print "encendido port15" | Python |
#Ports
#Imports
#import RPi.GPIO as GPIO
class Ports:
def __init__(self,ConfigFile,ConstFile):
self.ArchivoConfiguracion = ConfigFile
self.ArchivoConstantes = ConstFile
GPIO.setmode(GPIO.BCM)
#Setea el puerto como entrada o salida
def SetPortConfig(self,Port,Type):
GPIO.setup(int(Port.strip()),GPIO.OUT)
self.__ModificarPuerto(Port,Type)
#Setea el estado de una salida como encendido o apagado
def SetPortStatus(self,Port,State):
if(State=="ON"):
GPIO.output(Port,True)
elif (State == "OFF"):
GPIO.output(Port,False)
#Lee el estado de la entrada
def ReadPort(self,Port):
return GPIO.input(int(Port.strip()))
def BuscarPorTipo(self,Tipo):
ListaPorts = []
Valores = []
Archivo = open(self.ArchivoConfiguracion,"r")
for Linea in Archivo:
if len(Linea) > 0:
Pin, Config = Linea.split('=')
if Config.strip() == Tipo.strip():
ListaPorts.append(Pin)
return ListaPorts
Archivo.close()
def ConvertirAGPIO (self, Port):
Archivo = open(self.ArchivoConstantes, "r")
retorno = 0
for Linea in Archivo:
Puerto, Pin = Linea.split('=')
if Puerto.strip() == Port:
break
return Pin
#-------------------------Metodos Privados -----------------------------
def __ModificarPuerto(self,Port,Setting):
Archivo = open(self.ArchivoConfiguracion,"r")
NuevoArchivo = []
for Linea in Archivo:
Param, Valor = Linea.split("=")
if(Param.strip() == Port):
Linea = Param.strip()+"="+Setting+"\n"
NuevoArchivo.append(Linea)
else:
Linea = Param.strip()+"="+Valor
NuevoArchivo.append(Linea)
Archivo.close()
Archivo = open(self.ArchivoConfiguracion,"w")
Archivo.writelines(NuevoArchivo)
Archivo.close()
| Python |
#Firmware Raspberry
#Imports
import time
from datetime import datetime
from Constantes import *
from Configurator import Configurator
from Sensor import Sensor
from Database import Database
from Ports import Ports
from Script import Script
import thread
#Startup
Por = Ports("PortConf.cf","Constantes.cf")
Sen = Sensor(11,Por.ConvertirAGPIO(Por.BuscarPorTipo("PortConf.cf","sensor"),"Constantes.cf"))
Db = Database("Mediciones.db")
Script = Script()
def LecturaTempHum(delay):
while True:
#Lectura del sensor y almacenamiento en la base Tabla: Mediciones
medicion = Sen.Obtener_Datos()
if medicion != "error":
Temperatura, Humedad = medicion.split("_")
Db.Insertar(Temperatura,Humedad,datetime.today())
def LeerScript():
while True:
Script.Ejecutar()
def EnvioYRecepcionDatos():
thread.start_new_thread(LecturaTempHum(3))
thread.start_new_thread(LeerScript())
thread.start_new_thread(EnvioYRecepcionDatos())
| Python |
#Networking Class
import os
import socket
import fcntl
import struct
import sys
class Networking:
def __init__(self,interface):
self.Interface = interface
self.SIOCGIFNETMASK = 0x891b
self.SIOCGIFADDR = 0x8915
def GetIPAddress(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl( s.fileno(),self.SIOCGIFADDR,struct.pack('256s', self.Interface[:15])
)[20:24])
def SetIpAdrees(self, IpAdress):
GatewayActual = self.GetGateway()
os.system("sudo ifconfig "+self.Interface+" "+IpAdress+" netmask "+self.GetNetMask())
os.system("sudo route add default gw "+GatewayActual+" "+self.Interface)
def GetNetMask(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
netmask = fcntl.ioctl(s, self.SIOCGIFNETMASK, struct.pack('256s', self.Interface))[20:24]
return socket.inet_ntoa(netmask)
def SetNetMask(self, NetMask):
GatewayActual = self.GetGateway()
os.system("sudo ifconfig "+self.Interface+" "+self.GetIPAddress()+" netmask "+NetMask)
os.system("sudo route add default gw "+GatewayActual+" "+self.Interface)
def GetGateway(self):
cmd = "ip route list dev "+ self.Interface + " | awk ' /^default/ {print $3}'"
fin,fout = os.popen4(cmd)
result = fout.read()
return result
def SetGateway(self, Gateway):
os.system("sudo ifconfig "+self.Interface+" "+self.GetIPAddress()+" netmask "+self.GetNetMask())
os.system("sudo route del default gw "+self.GetGateway())
os.system("sudo route add default gw "+Gateway +" "+self.Interface)
#Ejemplo de Implementacion
net = Networking("wlan0")
IP = net.GetIpAddress()
net.SetIpAddress("192.168.1.75")
| Python |
#Metodos De Acceso a la base de datos
import sqlite3
import sys
import os
from datetime import datetime
class Database:
def __init__(self,BaseDeDatos):
self.DB = sqlite3.connect(BaseDeDatos)
self.Cursor = self.DB.cursor()
def InsertarMedicion(self, Temperatura, Humedad, Fecha):
FechaFormateada = Fecha.strftime("%Y/%m/%d %H:%M:%S")
Values = Temperatura,Humedad,FechaFormateada
Consulta = str("Insert into Mediciones (Temperatura, Humedad, Hora) Values ("+Values[0]+","+Values[1]+",'"+Values[2]+"')")
self.Cursor.execute(Consulta)
self.DB.commit()
def InsertarEstado(self, Puerto, Estado, Fecha):
FechaFormateada = Fecha.strftime("%Y/%m/%d %H:%M:%S")
Values = Puerto,Estado,FechaFormateada
Consulta = str("Insert into Estados (Port, Estado, Hora) Values ('"+Values[0]+"',"+Values[1]+",'"+Values[2]+"')")
print Consulta
self.Cursor.execute(Consulta)
self.DB.commit()
def BuscarUltimoEstado(self, Entrada):
Datatable = []
self.Cursor.execute("Select MAX(id),Estado from Estados where Port = '"+Entrada+"'")
for Datos in self.Cursor.fetchall():
Datatable.append(Datos)
Id, Estado = Datatable[0]
return Estado
#Implementacion:
#Db = Database("Mediciones.db") En este caso se utiliza una base de datos sqlite3
#Db.Insertar('25','40',datetime.today())
| Python |
#!/usr/bin/python
import os
from urllib import urlopen
from bs4 import BeautifulSoup
output_dir = os.path.abspath('../assets/help')
assert os.path.exists(output_dir)
files_list_images = []
files_list_html = [
'Manual',
'FAQ',
'StylusSupport',
'SupportedDevices',
'Permissions' ]
def download_raw(url):
data = urlopen(url).read()
filename = url.split('/')[-1]
with open(os.path.join(output_dir, filename), 'wb') as f:
f.write(data)
def download_html(name):
url = 'http://code.google.com/p/android-quill/wiki/'+name+'?show=content'
data = urlopen(url).read()
soup = BeautifulSoup(data)
for script in soup.find_all('script'):
script.decompose()
wiki_home = '/p/android-quill/wiki/'
for link in soup.find_all('a'):
target = link.get('href', None)
if target is None:
continue
if target.startswith(wiki_home):
target = target[len(wiki_home):]
if target.count('#') == 0:
target += '.html'
elif target.count('#') == 1:
target = target.replace('#', '.html#')
else:
raise ValueError('More than one pound-sign in link??')
link['href'] = target
for img in soup.find_all('img'):
url = img['src']
if not url.startswith('http://'):
continue
files_list_images.append(url)
filename = url.split('/')[-1]
img['src'] = filename
print soup.prettify()
with open(os.path.join(output_dir, name+'.html'), 'wb') as f:
f.write(str(soup))
if __name__ == '__main__':
for f in files_list_html:
download_html(f)
for f in files_list_images:
download_raw(f)
| Python |
###
## * << Haru Free PDF Library 2.0.8 >> -- hpdf_consts.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
from hpdf_types import *
#----------------------------------------------------------------------------
HPDF_TRUE =1
HPDF_FALSE =0
HPDF_OK =0
HPDF_NOERROR =0
#----- default values -------------------------------------------------------
# buffer size which is required when we convert to character string.
HPDF_TMP_BUF_SIZ =512
HPDF_SHORT_BUF_SIZ =32
HPDF_REAL_LEN =11
HPDF_INT_LEN =11
HPDF_TEXT_DEFAULT_LEN =256
HPDF_UNICODE_HEADER_LEN =2
HPDF_DATE_TIME_STR_LEN =23
# length of each item defined in PDF
HPDF_BYTE_OFFSET_LEN =10
HPDF_OBJ_ID_LEN =7
HPDF_GEN_NO_LEN =5
# default value of Graphic State
HPDF_DEF_FONT ="Helvetica"
HPDF_DEF_PAGE_LAYOUT =HPDF_PAGE_LAYOUT_SINGLE
HPDF_DEF_PAGE_MODE =HPDF_PAGE_MODE_USE_NONE
HPDF_DEF_WORDSPACE =0
HPDF_DEF_CHARSPACE =0
HPDF_DEF_FONTSIZE =10
HPDF_DEF_HSCALING =100
HPDF_DEF_LEADING =0
HPDF_DEF_RENDERING_MODE =HPDF_FILL
HPDF_DEF_RISE =0
HPDF_DEF_RAISE =HPDF_DEF_RISE
HPDF_DEF_LINEWIDTH =1
HPDF_DEF_LINECAP =HPDF_BUTT_END
HPDF_DEF_LINEJOIN =HPDF_MITER_JOIN
HPDF_DEF_MITERLIMIT =10
HPDF_DEF_FLATNESS =1
HPDF_DEF_PAGE_NUM =1
HPDF_BS_DEF_WIDTH =1
# defalt page-size
HPDF_DEF_PAGE_WIDTH =595.276
HPDF_DEF_PAGE_HEIGHT =841.89
HPDF_VERSION_TEXT ="2.0.8"
#---------------------------------------------------------------------------
#----- compression mode ----------------------------------------------------
HPDF_COMP_NONE =0x00
HPDF_COMP_TEXT =0x01
HPDF_COMP_IMAGE =0x02
HPDF_COMP_METADATA =0x04
HPDF_COMP_ALL =0x0F
HPDF_COMP_BEST_COMPRESS =0x10
HPDF_COMP_BEST_SPEED =0x20
HPDF_COMP_MASK =0xFF
#----------------------------------------------------------------------------
#----- permission flags (only Revision 2 is supported)-----------------------
HPDF_ENABLE_READ =0
HPDF_ENABLE_PRINT =4
HPDF_ENABLE_EDIT_ALL =8
HPDF_ENABLE_COPY =16
HPDF_ENABLE_EDIT =32
#----------------------------------------------------------------------------
#------ viewer preferences definitions --------------------------------------
HPDF_HIDE_TOOLBAR =1
HPDF_HIDE_MENUBAR =2
HPDF_HIDE_WINDOW_UI =4
HPDF_FIT_WINDOW =8
HPDF_CENTER_WINDOW =16
#---------------------------------------------------------------------------
#------ limitation of object implementation (PDF1.4) -----------------------
HPDF_LIMIT_MAX_INT =2147483647
HPDF_LIMIT_MIN_INT =-2147483647
HPDF_LIMIT_MAX_REAL =32767
HPDF_LIMIT_MIN_REAL =-32767
HPDF_LIMIT_MAX_STRING_LEN =65535
HPDF_LIMIT_MAX_NAME_LEN =127
HPDF_LIMIT_MAX_ARRAY =8191
HPDF_LIMIT_MAX_DICT_ELEMENT =4095
HPDF_LIMIT_MAX_XREF_ELEMENT =8388607
HPDF_LIMIT_MAX_GSTATE =28
HPDF_LIMIT_MAX_DEVICE_N =8
HPDF_LIMIT_MAX_DEVICE_N_V15 =32
HPDF_LIMIT_MAX_CID =65535
HPDF_MAX_GENERATION_NUM =65535
HPDF_MIN_PAGE_HEIGHT =3
HPDF_MIN_PAGE_WIDTH =3
HPDF_MAX_PAGE_HEIGHT =14400
HPDF_MAX_PAGE_WIDTH =14400
HPDF_MIN_MAGNIFICATION_FACTOR =8
HPDF_MAX_MAGNIFICATION_FACTOR =3200
#---------------------------------------------------------------------------
#------ limitation of various properties -----------------------------------
HPDF_MIN_PAGE_SIZE =3
HPDF_MAX_PAGE_SIZE =14400
HPDF_MIN_HORIZONTALSCALING =10
HPDF_MAX_HORIZONTALSCALING =300
HPDF_MIN_WORDSPACE =-30
HPDF_MAX_WORDSPACE =300
HPDF_MIN_CHARSPACE =-30
HPDF_MAX_CHARSPACE =300
HPDF_MAX_FONTSIZE =300
HPDF_MAX_ZOOMSIZE =10
HPDF_MAX_LEADING =300
HPDF_MAX_LINEWIDTH =100
HPDF_MAX_DASH_PATTERN =100
HPDF_MAX_JWW_NUM =128
#----------------------------------------------------------------------------
#----- country code definition ----------------------------------------------
HPDF_COUNTRY_AF ="AF" # AFGHANISTAN
HPDF_COUNTRY_AL ="AL" # ALBANIA
HPDF_COUNTRY_DZ ="DZ" # ALGERIA
HPDF_COUNTRY_AS ="AS" # AMERICAN SAMOA
HPDF_COUNTRY_AD ="AD" # ANDORRA
HPDF_COUNTRY_AO ="AO" # ANGOLA
HPDF_COUNTRY_AI ="AI" # ANGUILLA
HPDF_COUNTRY_AQ ="AQ" # ANTARCTICA
HPDF_COUNTRY_AG ="AG" # ANTIGUA AND BARBUDA
HPDF_COUNTRY_AR ="AR" # ARGENTINA
HPDF_COUNTRY_AM ="AM" # ARMENIA
HPDF_COUNTRY_AW ="AW" # ARUBA
HPDF_COUNTRY_AU ="AU" # AUSTRALIA
HPDF_COUNTRY_AT ="AT" # AUSTRIA
HPDF_COUNTRY_AZ ="AZ" # AZERBAIJAN
HPDF_COUNTRY_BS ="BS" # BAHAMAS
HPDF_COUNTRY_BH ="BH" # BAHRAIN
HPDF_COUNTRY_BD ="BD" # BANGLADESH
HPDF_COUNTRY_BB ="BB" # BARBADOS
HPDF_COUNTRY_BY ="BY" # BELARUS
HPDF_COUNTRY_BE ="BE" # BELGIUM
HPDF_COUNTRY_BZ ="BZ" # BELIZE
HPDF_COUNTRY_BJ ="BJ" # BENIN
HPDF_COUNTRY_BM ="BM" # BERMUDA
HPDF_COUNTRY_BT ="BT" # BHUTAN
HPDF_COUNTRY_BO ="BO" # BOLIVIA
HPDF_COUNTRY_BA ="BA" # BOSNIA AND HERZEGOWINA
HPDF_COUNTRY_BW ="BW" # BOTSWANA
HPDF_COUNTRY_BV ="BV" # BOUVET ISLAND
HPDF_COUNTRY_BR ="BR" # BRAZIL
HPDF_COUNTRY_IO ="IO" # BRITISH INDIAN OCEAN TERRITORY
HPDF_COUNTRY_BN ="BN" # BRUNEI DARUSSALAM
HPDF_COUNTRY_BG ="BG" # BULGARIA
HPDF_COUNTRY_BF ="BF" # BURKINA FASO
HPDF_COUNTRY_BI ="BI" # BURUNDI
HPDF_COUNTRY_KH ="KH" # CAMBODIA
HPDF_COUNTRY_CM ="CM" # CAMEROON
HPDF_COUNTRY_CA ="CA" # CANADA
HPDF_COUNTRY_CV ="CV" # CAPE VERDE
HPDF_COUNTRY_KY ="KY" # CAYMAN ISLANDS
HPDF_COUNTRY_CF ="CF" # CENTRAL AFRICAN REPUBLIC
HPDF_COUNTRY_TD ="TD" # CHAD
HPDF_COUNTRY_CL ="CL" # CHILE
HPDF_COUNTRY_CN ="CN" # CHINA
HPDF_COUNTRY_CX ="CX" # CHRISTMAS ISLAND
HPDF_COUNTRY_CC ="CC" # COCOS (KEELING) ISLANDS
HPDF_COUNTRY_CO ="CO" # COLOMBIA
HPDF_COUNTRY_KM ="KM" # COMOROS
HPDF_COUNTRY_CG ="CG" # CONGO
HPDF_COUNTRY_CK ="CK" # COOK ISLANDS
HPDF_COUNTRY_CR ="CR" # COSTA RICA
HPDF_COUNTRY_CI ="CI" # COTE D'IVOIRE
HPDF_COUNTRY_HR ="HR" # CROATIA (local name: Hrvatska)
HPDF_COUNTRY_CU ="CU" # CUBA
HPDF_COUNTRY_CY ="CY" # CYPRUS
HPDF_COUNTRY_CZ ="CZ" # CZECH REPUBLIC
HPDF_COUNTRY_DK ="DK" # DENMARK
HPDF_COUNTRY_DJ ="DJ" # DJIBOUTI
HPDF_COUNTRY_DM ="DM" # DOMINICA
HPDF_COUNTRY_DO ="DO" # DOMINICAN REPUBLIC
HPDF_COUNTRY_TP ="TP" # EAST TIMOR
HPDF_COUNTRY_EC ="EC" # ECUADOR
HPDF_COUNTRY_EG ="EG" # EGYPT
HPDF_COUNTRY_SV ="SV" # EL SALVADOR
HPDF_COUNTRY_GQ ="GQ" # EQUATORIAL GUINEA
HPDF_COUNTRY_ER ="ER" # ERITREA
HPDF_COUNTRY_EE ="EE" # ESTONIA
HPDF_COUNTRY_ET ="ET" # ETHIOPIA
HPDF_COUNTRY_FK ="FK" # FALKLAND ISLANDS (MALVINAS)
HPDF_COUNTRY_FO ="FO" # FAROE ISLANDS
HPDF_COUNTRY_FJ ="FJ" # FIJI
HPDF_COUNTRY_FI ="FI" # FINLAND
HPDF_COUNTRY_FR ="FR" # FRANCE
HPDF_COUNTRY_FX ="FX" # FRANCE, METROPOLITAN
HPDF_COUNTRY_GF ="GF" # FRENCH GUIANA
HPDF_COUNTRY_PF ="PF" # FRENCH POLYNESIA
HPDF_COUNTRY_TF ="TF" # FRENCH SOUTHERN TERRITORIES
HPDF_COUNTRY_GA ="GA" # GABON
HPDF_COUNTRY_GM ="GM" # GAMBIA
HPDF_COUNTRY_GE ="GE" # GEORGIA
HPDF_COUNTRY_DE ="DE" # GERMANY
HPDF_COUNTRY_GH ="GH" # GHANA
HPDF_COUNTRY_GI ="GI" # GIBRALTAR
HPDF_COUNTRY_GR ="GR" # GREECE
HPDF_COUNTRY_GL ="GL" # GREENLAND
HPDF_COUNTRY_GD ="GD" # GRENADA
HPDF_COUNTRY_GP ="GP" # GUADELOUPE
HPDF_COUNTRY_GU ="GU" # GUAM
HPDF_COUNTRY_GT ="GT" # GUATEMALA
HPDF_COUNTRY_GN ="GN" # GUINEA
HPDF_COUNTRY_GW ="GW" # GUINEA-BISSAU
HPDF_COUNTRY_GY ="GY" # GUYANA
HPDF_COUNTRY_HT ="HT" # HAITI
HPDF_COUNTRY_HM ="HM" # HEARD AND MC DONALD ISLANDS
HPDF_COUNTRY_HN ="HN" # HONDURAS
HPDF_COUNTRY_HK ="HK" # HONG KONG
HPDF_COUNTRY_HU ="HU" # HUNGARY
HPDF_COUNTRY_IS ="IS" # ICELAND
HPDF_COUNTRY_IN ="IN" # INDIA
HPDF_COUNTRY_ID ="ID" # INDONESIA
HPDF_COUNTRY_IR ="IR" # IRAN (ISLAMIC REPUBLIC OF)
HPDF_COUNTRY_IQ ="IQ" # IRAQ
HPDF_COUNTRY_IE ="IE" # IRELAND
HPDF_COUNTRY_IL ="IL" # ISRAEL
HPDF_COUNTRY_IT ="IT" # ITALY
HPDF_COUNTRY_JM ="JM" # JAMAICA
HPDF_COUNTRY_JP ="JP" # JAPAN
HPDF_COUNTRY_JO ="JO" # JORDAN
HPDF_COUNTRY_KZ ="KZ" # KAZAKHSTAN
HPDF_COUNTRY_KE ="KE" # KENYA
HPDF_COUNTRY_KI ="KI" # KIRIBATI
HPDF_COUNTRY_KP ="KP" # KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF
HPDF_COUNTRY_KR ="KR" # KOREA, REPUBLIC OF
HPDF_COUNTRY_KW ="KW" # KUWAIT
HPDF_COUNTRY_KG ="KG" # KYRGYZSTAN
HPDF_COUNTRY_LA ="LA" # LAO PEOPLE'S DEMOCRATIC REPUBLIC
HPDF_COUNTRY_LV ="LV" # LATVIA
HPDF_COUNTRY_LB ="LB" # LEBANON
HPDF_COUNTRY_LS ="LS" # LESOTHO
HPDF_COUNTRY_LR ="LR" # LIBERIA
HPDF_COUNTRY_LY ="LY" # LIBYAN ARAB JAMAHIRIYA
HPDF_COUNTRY_LI ="LI" # LIECHTENSTEIN
HPDF_COUNTRY_LT ="LT" # LITHUANIA
HPDF_COUNTRY_LU ="LU" # LUXEMBOURG
HPDF_COUNTRY_MO ="MO" # MACAU
HPDF_COUNTRY_MK ="MK" # MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF
HPDF_COUNTRY_MG ="MG" # MADAGASCAR
HPDF_COUNTRY_MW ="MW" # MALAWI
HPDF_COUNTRY_MY ="MY" # MALAYSIA
HPDF_COUNTRY_MV ="MV" # MALDIVES
HPDF_COUNTRY_ML ="ML" # MALI
HPDF_COUNTRY_MT ="MT" # MALTA
HPDF_COUNTRY_MH ="MH" # MARSHALL ISLANDS
HPDF_COUNTRY_MQ ="MQ" # MARTINIQUE
HPDF_COUNTRY_MR ="MR" # MAURITANIA
HPDF_COUNTRY_MU ="MU" # MAURITIUS
HPDF_COUNTRY_YT ="YT" # MAYOTTE
HPDF_COUNTRY_MX ="MX" # MEXICO
HPDF_COUNTRY_FM ="FM" # MICRONESIA, FEDERATED STATES OF
HPDF_COUNTRY_MD ="MD" # MOLDOVA, REPUBLIC OF
HPDF_COUNTRY_MC ="MC" # MONACO
HPDF_COUNTRY_MN ="MN" # MONGOLIA
HPDF_COUNTRY_MS ="MS" # MONTSERRAT
HPDF_COUNTRY_MA ="MA" # MOROCCO
HPDF_COUNTRY_MZ ="MZ" # MOZAMBIQUE
HPDF_COUNTRY_MM ="MM" # MYANMAR
HPDF_COUNTRY_NA ="NA" # NAMIBIA
HPDF_COUNTRY_NR ="NR" # NAURU
HPDF_COUNTRY_NP ="NP" # NEPAL
HPDF_COUNTRY_NL ="NL" # NETHERLANDS
HPDF_COUNTRY_AN ="AN" # NETHERLANDS ANTILLES
HPDF_COUNTRY_NC ="NC" # NEW CALEDONIA
HPDF_COUNTRY_NZ ="NZ" # NEW ZEALAND
HPDF_COUNTRY_NI ="NI" # NICARAGUA
HPDF_COUNTRY_NE ="NE" # NIGER
HPDF_COUNTRY_NG ="NG" # NIGERIA
HPDF_COUNTRY_NU ="NU" # NIUE
HPDF_COUNTRY_NF ="NF" # NORFOLK ISLAND
HPDF_COUNTRY_MP ="MP" # NORTHERN MARIANA ISLANDS
HPDF_COUNTRY_NO ="NO" # NORWAY
HPDF_COUNTRY_OM ="OM" # OMAN
HPDF_COUNTRY_PK ="PK" # PAKISTAN
HPDF_COUNTRY_PW ="PW" # PALAU
HPDF_COUNTRY_PA ="PA" # PANAMA
HPDF_COUNTRY_PG ="PG" # PAPUA NEW GUINEA
HPDF_COUNTRY_PY ="PY" # PARAGUAY
HPDF_COUNTRY_PE ="PE" # PERU
HPDF_COUNTRY_PH ="PH" # PHILIPPINES
HPDF_COUNTRY_PN ="PN" # PITCAIRN
HPDF_COUNTRY_PL ="PL" # POLAND
HPDF_COUNTRY_PT ="PT" # PORTUGAL
HPDF_COUNTRY_PR ="PR" # PUERTO RICO
HPDF_COUNTRY_QA ="QA" # QATAR
HPDF_COUNTRY_RE ="RE" # REUNION
HPDF_COUNTRY_RO ="RO" # ROMANIA
HPDF_COUNTRY_RU ="RU" # RUSSIAN FEDERATION
HPDF_COUNTRY_RW ="RW" # RWANDA
HPDF_COUNTRY_KN ="KN" # SAINT KITTS AND NEVIS
HPDF_COUNTRY_LC ="LC" # SAINT LUCIA
HPDF_COUNTRY_VC ="VC" # SAINT VINCENT AND THE GRENADINES
HPDF_COUNTRY_WS ="WS" # SAMOA
HPDF_COUNTRY_SM ="SM" # SAN MARINO
HPDF_COUNTRY_ST ="ST" # SAO TOME AND PRINCIPE
HPDF_COUNTRY_SA ="SA" # SAUDI ARABIA
HPDF_COUNTRY_SN ="SN" # SENEGAL
HPDF_COUNTRY_SC ="SC" # SEYCHELLES
HPDF_COUNTRY_SL ="SL" # SIERRA LEONE
HPDF_COUNTRY_SG ="SG" # SINGAPORE
HPDF_COUNTRY_SK ="SK" # SLOVAKIA (Slovak Republic)
HPDF_COUNTRY_SI ="SI" # SLOVENIA
HPDF_COUNTRY_SB ="SB" # SOLOMON ISLANDS
HPDF_COUNTRY_SO ="SO" # SOMALIA
HPDF_COUNTRY_ZA ="ZA" # SOUTH AFRICA
HPDF_COUNTRY_ES ="ES" # SPAIN
HPDF_COUNTRY_LK ="LK" # SRI LANKA
HPDF_COUNTRY_SH ="SH" # ST. HELENA
HPDF_COUNTRY_PM ="PM" # ST. PIERRE AND MIQUELON
HPDF_COUNTRY_SD ="SD" # SUDAN
HPDF_COUNTRY_SR ="SR" # SURINAME
HPDF_COUNTRY_SJ ="SJ" # SVALBARD AND JAN MAYEN ISLANDS
HPDF_COUNTRY_SZ ="SZ" # SWAZILAND
HPDF_COUNTRY_SE ="SE" # SWEDEN
HPDF_COUNTRY_CH ="CH" # SWITZERLAND
HPDF_COUNTRY_SY ="SY" # SYRIAN ARAB REPUBLIC
HPDF_COUNTRY_TW ="TW" # TAIWAN, PROVINCE OF CHINA
HPDF_COUNTRY_TJ ="TJ" # TAJIKISTAN
HPDF_COUNTRY_TZ ="TZ" # TANZANIA, UNITED REPUBLIC OF
HPDF_COUNTRY_TH ="TH" # THAILAND
HPDF_COUNTRY_TG ="TG" # TOGO
HPDF_COUNTRY_TK ="TK" # TOKELAU
HPDF_COUNTRY_TO ="TO" # TONGA
HPDF_COUNTRY_TT ="TT" # TRINIDAD AND TOBAGO
HPDF_COUNTRY_TN ="TN" # TUNISIA
HPDF_COUNTRY_TR ="TR" # TURKEY
HPDF_COUNTRY_TM ="TM" # TURKMENISTAN
HPDF_COUNTRY_TC ="TC" # TURKS AND CAICOS ISLANDS
HPDF_COUNTRY_TV ="TV" # TUVALU
HPDF_COUNTRY_UG ="UG" # UGANDA
HPDF_COUNTRY_UA ="UA" # UKRAINE
HPDF_COUNTRY_AE ="AE" # UNITED ARAB EMIRATES
HPDF_COUNTRY_GB ="GB" # UNITED KINGDOM
HPDF_COUNTRY_US ="US" # UNITED STATES
HPDF_COUNTRY_UM ="UM" # UNITED STATES MINOR OUTLYING ISLANDS
HPDF_COUNTRY_UY ="UY" # URUGUAY
HPDF_COUNTRY_UZ ="UZ" # UZBEKISTAN
HPDF_COUNTRY_VU ="VU" # VANUATU
HPDF_COUNTRY_VA ="VA" # VATICAN CITY STATE (HOLY SEE)
HPDF_COUNTRY_VE ="VE" # VENEZUELA
HPDF_COUNTRY_VN ="VN" # VIET NAM
HPDF_COUNTRY_VG ="VG" # VIRGIN ISLANDS (BRITISH)
HPDF_COUNTRY_VI ="VI" # VIRGIN ISLANDS (U.S.)
HPDF_COUNTRY_WF ="WF" # WALLIS AND FUTUNA ISLANDS
HPDF_COUNTRY_EH ="EH" # WESTERN SAHARA
HPDF_COUNTRY_YE ="YE" # YEMEN
HPDF_COUNTRY_YU ="YU" # YUGOSLAVIA
HPDF_COUNTRY_ZR ="ZR" # ZAIRE
HPDF_COUNTRY_ZM ="ZM" # ZAMBIA
HPDF_COUNTRY_ZW ="ZW" # ZIMBABWE
#----------------------------------------------------------------------------
#----- lang code definition -------------------------------------------------
HPDF_LANG_AA ="aa" # Afar
HPDF_LANG_AB ="ab" # Abkhazian
HPDF_LANG_AF ="af" # Afrikaans
HPDF_LANG_AM ="am" # Amharic
HPDF_LANG_AR ="ar" # Arabic
HPDF_LANG_AS ="as" # Assamese
HPDF_LANG_AY ="ay" # Aymara
HPDF_LANG_AZ ="az" # Azerbaijani
HPDF_LANG_BA ="ba" # Bashkir
HPDF_LANG_BE ="be" # Byelorussian
HPDF_LANG_BG ="bg" # Bulgarian
HPDF_LANG_BH ="bh" # Bihari
HPDF_LANG_BI ="bi" # Bislama
HPDF_LANG_BN ="bn" # Bengali Bangla
HPDF_LANG_BO ="bo" # Tibetan
HPDF_LANG_BR ="br" # Breton
HPDF_LANG_CA ="ca" # Catalan
HPDF_LANG_CO ="co" # Corsican
HPDF_LANG_CS ="cs" # Czech
HPDF_LANG_CY ="cy" # Welsh
HPDF_LANG_DA ="da" # Danish
HPDF_LANG_DE ="de" # German
HPDF_LANG_DZ ="dz" # Bhutani
HPDF_LANG_EL ="el" # Greek
HPDF_LANG_EN ="en" # English
HPDF_LANG_EO ="eo" # Esperanto
HPDF_LANG_ES ="es" # Spanish
HPDF_LANG_ET ="et" # Estonian
HPDF_LANG_EU ="eu" # Basque
HPDF_LANG_FA ="fa" # Persian
HPDF_LANG_FI ="fi" # Finnish
HPDF_LANG_FJ ="fj" # Fiji
HPDF_LANG_FO ="fo" # Faeroese
HPDF_LANG_FR ="fr" # French
HPDF_LANG_FY ="fy" # Frisian
HPDF_LANG_GA ="ga" # Irish
HPDF_LANG_GD ="gd" # Scots Gaelic
HPDF_LANG_GL ="gl" # Galician
HPDF_LANG_GN ="gn" # Guarani
HPDF_LANG_GU ="gu" # Gujarati
HPDF_LANG_HA ="ha" # Hausa
HPDF_LANG_HI ="hi" # Hindi
HPDF_LANG_HR ="hr" # Croatian
HPDF_LANG_HU ="hu" # Hungarian
HPDF_LANG_HY ="hy" # Armenian
HPDF_LANG_IA ="ia" # Interlingua
HPDF_LANG_IE ="ie" # Interlingue
HPDF_LANG_IK ="ik" # Inupiak
HPDF_LANG_IN ="in" # Indonesian
HPDF_LANG_IS ="is" # Icelandic
HPDF_LANG_IT ="it" # Italian
HPDF_LANG_IW ="iw" # Hebrew
HPDF_LANG_JA ="ja" # Japanese
HPDF_LANG_JI ="ji" # Yiddish
HPDF_LANG_JW ="jw" # Javanese
HPDF_LANG_KA ="ka" # Georgian
HPDF_LANG_KK ="kk" # Kazakh
HPDF_LANG_KL ="kl" # Greenlandic
HPDF_LANG_KM ="km" # Cambodian
HPDF_LANG_KN ="kn" # Kannada
HPDF_LANG_KO ="ko" # Korean
HPDF_LANG_KS ="ks" # Kashmiri
HPDF_LANG_KU ="ku" # Kurdish
HPDF_LANG_KY ="ky" # Kirghiz
HPDF_LANG_LA ="la" # Latin
HPDF_LANG_LN ="ln" # Lingala
HPDF_LANG_LO ="lo" # Laothian
HPDF_LANG_LT ="lt" # Lithuanian
HPDF_LANG_LV ="lv" # Latvian,Lettish
HPDF_LANG_MG ="mg" # Malagasy
HPDF_LANG_MI ="mi" # Maori
HPDF_LANG_MK ="mk" # Macedonian
HPDF_LANG_ML ="ml" # Malayalam
HPDF_LANG_MN ="mn" # Mongolian
HPDF_LANG_MO ="mo" # Moldavian
HPDF_LANG_MR ="mr" # Marathi
HPDF_LANG_MS ="ms" # Malay
HPDF_LANG_MT ="mt" # Maltese
HPDF_LANG_MY ="my" # Burmese
HPDF_LANG_NA ="na" # Nauru
HPDF_LANG_NE ="ne" # Nepali
HPDF_LANG_NL ="nl" # Dutch
HPDF_LANG_NO ="no" # Norwegian
HPDF_LANG_OC ="oc" # Occitan
HPDF_LANG_OM ="om" # (Afan)Oromo
HPDF_LANG_OR ="or" # Oriya
HPDF_LANG_PA ="pa" # Punjabi
HPDF_LANG_PL ="pl" # Polish
HPDF_LANG_PS ="ps" # Pashto,Pushto
HPDF_LANG_PT ="pt" # Portuguese
HPDF_LANG_QU ="qu" # Quechua
HPDF_LANG_RM ="rm" # Rhaeto-Romance
HPDF_LANG_RN ="rn" # Kirundi
HPDF_LANG_RO ="ro" # Romanian
HPDF_LANG_RU ="ru" # Russian
HPDF_LANG_RW ="rw" # Kinyarwanda
HPDF_LANG_SA ="sa" # Sanskrit
HPDF_LANG_SD ="sd" # Sindhi
HPDF_LANG_SG ="sg" # Sangro
HPDF_LANG_SH ="sh" # Serbo-Croatian
HPDF_LANG_SI ="si" # Singhalese
HPDF_LANG_SK ="sk" # Slovak
HPDF_LANG_SL ="sl" # Slovenian
HPDF_LANG_SM ="sm" # Samoan
HPDF_LANG_SN ="sn" # Shona
HPDF_LANG_SO ="so" # Somali
HPDF_LANG_SQ ="sq" # Albanian
HPDF_LANG_SR ="sr" # Serbian
HPDF_LANG_SS ="ss" # Siswati
HPDF_LANG_ST ="st" # Sesotho
HPDF_LANG_SU ="su" # Sundanese
HPDF_LANG_SV ="sv" # Swedish
HPDF_LANG_SW ="sw" # Swahili
HPDF_LANG_TA ="ta" # Tamil
HPDF_LANG_TE ="te" # Tegulu
HPDF_LANG_TG ="tg" # Tajik
HPDF_LANG_TH ="th" # Thai
HPDF_LANG_TI ="ti" # Tigrinya
HPDF_LANG_TK ="tk" # Turkmen
HPDF_LANG_TL ="tl" # Tagalog
HPDF_LANG_TN ="tn" # Setswanato Tonga
HPDF_LANG_TR ="tr" # Turkish
HPDF_LANG_TS ="ts" # Tsonga
HPDF_LANG_TT ="tt" # Tatar
HPDF_LANG_TW ="tw" # Twi
HPDF_LANG_UK ="uk" # Ukrainian
HPDF_LANG_UR ="ur" # Urdu
HPDF_LANG_UZ ="uz" # Uzbek
HPDF_LANG_VI ="vi" # Vietnamese
HPDF_LANG_VO ="vo" # Volapuk
HPDF_LANG_WO ="wo" # Wolof
HPDF_LANG_XH ="xh" # Xhosa
HPDF_LANG_YO ="yo" # Yoruba
HPDF_LANG_ZH ="zh" # Chinese
HPDF_LANG_ZU ="zu" # Zulu
#----------------------------------------------------------------------------
#----- Graphis mode ---------------------------------------------------------
HPDF_GMODE_PAGE_DESCRIPTION =0x0001
HPDF_GMODE_PATH_OBJECT =0x0002
HPDF_GMODE_TEXT_OBJECT =0x0004
HPDF_GMODE_CLIPPING_PATH =0x0008
HPDF_GMODE_SHADING =0x0010
HPDF_GMODE_INLINE_IMAGE =0x0020
HPDF_GMODE_EXTERNAL_OBJECT =0x0040
| Python |
###
## * << Haru Free PDF Library 2.0.3 >> -- hpdf_types.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
from ctypes import *
#----------------------------------------------------------------------------
#----- type definition ------------------------------------------------------
# native OS integer types
HPDF_INT=c_int
HPDF_UINT=c_uint
# 32bit integer types
HPDF_INT32=c_int
HPDF_UINT32=c_uint
# 16bit integer types
HPDF_INT16=c_short
HPDF_UINT16=c_ushort
# 8bit integer types
HPDF_INT8=c_char
HPDF_UINT8=c_ubyte
# 8bit binary types
HPDF_BYTE=c_ubyte
# float type (32bit IEEE754)
HPDF_REAL=c_float
# double type (64bit IEEE754)
HPDF_DOUBLE=c_double
# boolean type (0: False, !0: True)
HPDF_BOOL=c_int
# error-no type (32bit unsigned integer)
HPDF_STATUS=c_ulong
# charactor-code type (16bit)
HPDF_CID=HPDF_UINT16
HPDF_UNICODE=HPDF_UINT16
# HPDF_Point struct
class _HPDF_Point(Structure):
_fields_=[
('x', HPDF_REAL),
('y', HPDF_REAL),
]
HPDF_Point = _HPDF_Point
class _HPDF_Rect(Structure):
_fields_=[
('left', HPDF_REAL),
('bottom', HPDF_REAL),
('right', HPDF_REAL),
('top', HPDF_REAL),
]
HPDF_Rect = _HPDF_Rect
HPDF_Box= _HPDF_Rect
# HPDF_Date struct
class _HPDF_Date(Structure):
_fields_=[
('year', HPDF_INT),
('month', HPDF_INT),
('day', HPDF_INT),
('hour', HPDF_INT),
('minutes', HPDF_INT),
('seconds', HPDF_INT),
('ind', c_char),
('off_hour', HPDF_INT),
('off_minutes', HPDF_INT),
]
HPDF_Date = _HPDF_Date
#enum starts
## date-time type parameters
HPDF_INFO_CREATION_DATE =0
HPDF_INFO_MOD_DATE =1
## string type parameters
HPDF_INFO_AUTHOR =2
HPDF_INFO_CREATOR =3
HPDF_INFO_PRODUCER =4
HPDF_INFO_TITLE =5
HPDF_INFO_SUBJECT =6
HPDF_INFO_KEYWORDS =7
HPDF_INFO_EOF =8
HPDF_InfoType =c_int
#enum ends
#enum starts
HPDF_VER_12 =0
HPDF_VER_13 =1
HPDF_VER_14 =2
HPDF_VER_15 =3
HPDF_VER_16 =4
HPDF_VER_EOF =5
HPDF_PDFVer =c_int
#enum ends
#enum starts
HPDF_ENCRYPT_R2 =2
HPDF_ENCRYPT_R3 =3
HPDF_EncryptMode =c_int
#enum ends
##typedef void
##(HPDF_STDCALL *HPDF_Error_Handler) (HPDF_STATUS error_no,
## HPDF_STATUS detail_no,
## void *user_data)
def HPDF_Error_Handler(restype,
error_no,
detail_no,
user_data):
return CFUNCTYPE(restype,
error_no,
detail_no,
user_data,
)
#typedef void*
#(HPDF_STDCALL *HPDF_Alloc_Func) (HPDF_UINT size)
HPDF_Alloc_Func=CFUNCTYPE(c_void_p,
HPDF_UINT, #size
)
#typedef void
#(HPDF_STDCALL *HPDF_Free_Func) (void *aptr)
HPDF_Free_Func=CFUNCTYPE(None,
c_void_p, #aptr
)
#---------------------------------------------------------------------------
#------ text width struct --------------------------------------------------
class _HPDF_TextWidth(Structure):
_fields_=[
('numchars', HPDF_UINT),
# don't use this value (it may be change in the feature).
# use numspace as alternated.
('numwords', HPDF_UINT),
('width', HPDF_UINT),
('numspace', HPDF_UINT),
]
HPDF_TextWidth = _HPDF_TextWidth
#---------------------------------------------------------------------------
#------ dash mode ----------------------------------------------------------
class _HPDF_DashMode(Structure):
_fields_=[
('ptn', HPDF_UINT16*8),
('num_ptn', HPDF_UINT),
('phase', HPDF_UINT),
]
HPDF_DashMode = _HPDF_DashMode
#---------------------------------------------------------------------------
#----- HPDF_TransMatrix struct ---------------------------------------------
class _HPDF_TransMatrix(Structure):
_fields_=[
('a', HPDF_REAL),
('b', HPDF_REAL),
('c', HPDF_REAL),
('d', HPDF_REAL),
('x', HPDF_REAL),
('y', HPDF_REAL),
]
HPDF_TransMatrix = _HPDF_TransMatrix
#---------------------------------------------------------------------------
#enum starts
HPDF_CS_DEVICE_GRAY =0
HPDF_CS_DEVICE_RGB =1
HPDF_CS_DEVICE_CMYK =2
HPDF_CS_CAL_GRAY =3
HPDF_CS_CAL_RGB =4
HPDF_CS_LAB =5
HPDF_CS_ICC_BASED =6
HPDF_CS_SEPARATION =7
HPDF_CS_DEVICE_N =8
HPDF_CS_INDEXED =9
HPDF_CS_PATTERN =10
HPDF_CS_EOF =11
HPDF_ColorSpace =c_int
#enum ends
#---------------------------------------------------------------------------
#----- HPDF_RGBColor struct ------------------------------------------------
class _HPDF_RGBColor(Structure):
_fields_=[
('r', HPDF_REAL),
('g', HPDF_REAL),
('b', HPDF_REAL),
]
HPDF_RGBColor = _HPDF_RGBColor
#---------------------------------------------------------------------------
#----- HPDF_CMYKColor struct -----------------------------------------------
class _HPDF_CMYKColor(Structure):
_fields_=[
('c', HPDF_REAL),
('m', HPDF_REAL),
('y', HPDF_REAL),
('k', HPDF_REAL),
]
HPDF_CMYKColor=_HPDF_CMYKColor
#---------------------------------------------------------------------------
#------ The line cap style -------------------------------------------------
#enum starts
HPDF_BUTT_END =0
HPDF_ROUND_END =1
HPDF_PROJECTING_SCUARE_END =2
HPDF_LINECAP_EOF =3
HPDF_LineCap =c_int
#enum ends
#----------------------------------------------------------------------------
#------ The line join style -------------------------------------------------
#enum starts
HPDF_MITER_JOIN =0
HPDF_ROUND_JOIN =1
HPDF_BEVEL_JOIN =2
HPDF_LINEJOIN_EOF =3
HPDF_LineJoin =c_int
#enum ends
#----------------------------------------------------------------------------
#------ The text rendering mode ---------------------------------------------
#enum starts
HPDF_FILL =0
HPDF_STROKE =1
HPDF_FILL_THEN_STROKE =2
HPDF_INVISIBLE =3
HPDF_FILL_CLIPPING =4
HPDF_STROKE_CLIPPING =5
HPDF_FILL_STROKE_CLIPPING =6
HPDF_CLIPPING =7
HPDF_RENDERING_MODE_EOF =8
HPDF_TextRenderingMode =c_int
#enum ends
#enum starts
HPDF_WMODE_HORIZONTAL =0
HPDF_WMODE_VERTICAL =1
HPDF_WMODE_EOF =2
HPDF_WritingMode =c_int
#enum ends
#enum starts
HPDF_PAGE_LAYOUT_SINGLE =0
HPDF_PAGE_LAYOUT_ONE_COLUMN =1
HPDF_PAGE_LAYOUT_TWO_COLUMN_LEFT =2
HPDF_PAGE_LAYOUT_TWO_COLUMN_RIGHT =3
HPDF_PAGE_LAYOUT_EOF =4
HPDF_PageLayout =c_int
#enum ends
#enum starts
HPDF_PAGE_MODE_USE_NONE =0
HPDF_PAGE_MODE_USE_OUTLINE =1
HPDF_PAGE_MODE_USE_THUMBS =2
HPDF_PAGE_MODE_FULL_SCREEN =3
HPDF_PAGE_MODE_USE_OC =4 #???
HPDF_PAGE_MODE_USE_ATTACHMENTS =5 #???
HPDF_PAGE_MODE_EOF =6
HPDF_PageMode =c_int
#enum ends
#enum starts
HPDF_PAGE_NUM_STYLE_DECIMAL =0
HPDF_PAGE_NUM_STYLE_UPPER_ROMAN =1
HPDF_PAGE_NUM_STYLE_LOWER_ROMAN =2
HPDF_PAGE_NUM_STYLE_UPPER_LETTERS =3
HPDF_PAGE_NUM_STYLE_LOWER_LETTERS =4
HPDF_PAGE_NUM_STYLE_EOF =5
HPDF_PageNumStyle =c_int
#enum ends
#enum starts
HPDF_XYZ =0
HPDF_FIT =1
HPDF_FIT_H =2
HPDF_FIT_V =3
HPDF_FIT_R =4
HPDF_FIT_B =5
HPDF_FIT_BH =6
HPDF_FIT_BV =7
HPDF_DST_EOF =8
HPDF_DestinationType =c_int
#enum ends
#enum starts
HPDF_ANNOT_TEXT_NOTES =0
HPDF_ANNOT_LINK =1
HPDF_ANNOT_SOUND =2
HPDF_ANNOT_FREE_TEXT =3
HPDF_ANNOT_STAMP =4
HPDF_ANNOT_SQUARE =5
HPDF_ANNOT_CIRCLE =6
HPDF_ANNOT_STRIKE_OUT =7
HPDF_ANNOT_HIGHTLIGHT =8
HPDF_ANNOT_UNDERLINE =9
HPDF_ANNOT_INK =10
HPDF_ANNOT_FILE_ATTACHMENT =11
HPDF_ANNOT_POPUP =12
HPDF_AnnotType =c_int
#enum ends
#enum starts
HPDF_ANNOT_INVISIBLE =0
HPDF_ANNOT_HIDDEN =1
HPDF_ANNOT_PRINT =2
HPDF_ANNOT_NOZOOM =3
HPDF_ANNOT_NOROTATE =4
HPDF_ANNOT_NOVIEW =5
HPDF_ANNOT_READONLY =6
HPDF_AnnotFlgs =c_int
#enum ends
#enum starts
HPDF_ANNOT_NO_HIGHTLIGHT =0
HPDF_ANNOT_INVERT_BOX =1
HPDF_ANNOT_INVERT_BORDER =2
HPDF_ANNOT_DOWN_APPEARANCE =3
HPDF_ANNOT_HIGHTLIGHT_MODE_EOF =4
HPDF_AnnotHighlightMode =c_int
#enum ends
#enum starts
HPDF_ANNOT_ICON_COMMENT =0
HPDF_ANNOT_ICON_KEY =1
HPDF_ANNOT_ICON_NOTE =2
HPDF_ANNOT_ICON_HELP =3
HPDF_ANNOT_ICON_NEW_PARAGRAPH =4
HPDF_ANNOT_ICON_PARAGRAPH =5
HPDF_ANNOT_ICON_INSERT =6
HPDF_ANNOT_ICON_EOF =7
HPDF_AnnotIcon =c_int
#enum ends
#----------------------------------------------------------------------------
#------ border stype --------------------------------------------------------
#enum starts
HPDF_BS_SOLID =0
HPDF_BS_DASHED =1
HPDF_BS_BEVELED =2
HPDF_BS_INSET =3
HPDF_BS_UNDERLINED =4
HPDF_BSSubtype =c_int
#enum ends
#----- blend modes ----------------------------------------------------------
#enum starts
HPDF_BM_NORMAL =0
HPDF_BM_MULTIPLY =1
HPDF_BM_SCREEN =2
HPDF_BM_OVERLAY =3
HPDF_BM_DARKEN =4
HPDF_BM_LIGHTEN =5
HPDF_BM_COLOR_DODGE =6
HPDF_BM_COLOR_BUM =7
HPDF_BM_HARD_LIGHT =8
HPDF_BM_SOFT_LIGHT =9
HPDF_BM_DIFFERENCE =10
HPDF_BM_EXCLUSHON =11
HPDF_BM_EOF =12
HPDF_BlendMode =c_int
#enum ends
#----- slide show -----------------------------------------------------------
#enum starts
HPDF_TS_WIPE_RIGHT =0
HPDF_TS_WIPE_UP =1
HPDF_TS_WIPE_LEFT =2
HPDF_TS_WIPE_DOWN =3
HPDF_TS_BARN_DOORS_HORIZONTAL_OUT =4
HPDF_TS_BARN_DOORS_HORIZONTAL_IN =5
HPDF_TS_BARN_DOORS_VERTICAL_OUT =6
HPDF_TS_BARN_DOORS_VERTICAL_IN =7
HPDF_TS_BOX_OUT =8
HPDF_TS_BOX_IN =9
HPDF_TS_BLINDS_HORIZONTAL =10
HPDF_TS_BLINDS_VERTICAL =11
HPDF_TS_DISSOLVE =12
HPDF_TS_GLITTER_RIGHT =13
HPDF_TS_GLITTER_DOWN =14
HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT =15
HPDF_TS_REPLACE =16
HPDF_TS_EOF =17
HPDF_TransitionStyle =c_int
#enum ends
#----------------------------------------------------------------------------
#enum starts
HPDF_PAGE_SIZE_LETTER =0
HPDF_PAGE_SIZE_LEGAL =1
HPDF_PAGE_SIZE_A3 =2
HPDF_PAGE_SIZE_A4 =3
HPDF_PAGE_SIZE_A5 =4
HPDF_PAGE_SIZE_B4 =5
HPDF_PAGE_SIZE_B5 =6
HPDF_PAGE_SIZE_EXECUTIVE =7
HPDF_PAGE_SIZE_US4x6 =8
HPDF_PAGE_SIZE_US4x8 =9
HPDF_PAGE_SIZE_US5x7 =10
HPDF_PAGE_SIZE_COMM10 =11
HPDF_PAGE_SIZE_EOF =12
HPDF_PageSizes =c_int
#enum ends
#enum starts
HPDF_PAGE_PORTRAIT =0
HPDF_PAGE_LANDSCAPE =1
HPDF_PageDirection =c_int
#enum ends
#enum starts
HPDF_ENCODER_TYPE_SINGLE_BYTE =0
HPDF_ENCODER_TYPE_DOUBLE_BYTE =1
HPDF_ENCODER_TYPE_UNINITIALIZED =2
HPDF_ENCODER_UNKNOWN =3
HPDF_EncoderType =c_int
#enum ends
#enum starts
HPDF_BYTE_TYPE_SINGLE =0
HPDF_BYTE_TYPE_LEAD =1
HPDF_BYTE_TYPE_TRIAL =2
HPDF_BYTE_TYPE_UNKNOWN =3
HPDF_ByteType =c_int
#enum ends
#enum starts
HPDF_TALIGN_LEFT =0
HPDF_TALIGN_RIGHT =1
HPDF_TALIGN_CENTER =2
HPDF_TALIGN_JUSTIFY =3
HPDF_TextAlignment =c_int
#enum ends
| Python |
error_detail={
0x1001: 'Internal error. The consistency of the data was lost.',
0x1002: 'Internal error. The consistency of the data was lost.',
0x1003: 'Internal error. The consistency of the data was lost.',
0x1004: 'The length of the data exceeds HPDF_LIMIT_MAX_STRING_LEN.',
0x1005: 'Cannot get a pallet data from PNG image.',
0x1007: 'The count of elements of a dictionary exceeds HPDF_LIMIT_MAX_DICT_ELEMENT',
0x1008: 'Internal error. The consistency of the data was lost.',
0x1009: 'Internal error. The consistency of the data was lost.',
0x100A: 'Internal error. The consistency of the data was lost.',
0x100B: 'HPDF_SetPermission() OR HPDF_SetEncryptMode() was called before a password is set.',
0x100C: 'Internal error. The consistency of the data was lost.',
0x100E: 'Tried to register a font that has been registered.',
0x100F: 'Cannot register a character to the japanese word wrap characters list.',
0x1011: 'Tried to set the owner password to NULL.\nThe owner password and user password is the same.',
0x1013: 'Internal error. The consistency of the data was lost.',
0x1014: 'The depth of the stack exceeded HPDF_LIMIT_MAX_GSTATE.',
0x1015: 'Memory allocation failed.',
0x1016: 'File processing failed. (A detailed code is set.)',
0x1017: 'Cannot open a file. (A detailed code is set.)',
0x1019: 'Tried to load a font that has been registered.',
0x101A: 'The format of a font-file is invalid .\nInternal error. The consistency of the data was lost.',
0x101B: 'Cannot recognize a header of an afm file.',
0x101C: 'The specified annotation handle is invalid.',
0x101E: 'Bit-per-component of a image which was set as mask-image is invalid.',
0x101F: 'Cannot recognize char-matrics-data of an afm file.',
0x1020: '1. The color_space parameter of HPDF_LoadRawImage is invalid.\n2. Color-space of a image which was set as mask-image is invalid.\n3. The function which is invalid in the present color-space was invoked.',
0x1021: 'Invalid value was set when invoking HPDF_SetCommpressionMode().',
0x1022: 'An invalid date-time value was set.',
0x1023: 'An invalid destination handle was set.',
0x1025: 'An invalid document handle is set.',
0x1026: 'The function which is invalid in the present state was invoked.',
0x1027: 'An invalid encoder handle is set.',
0x1028: 'A combination between font and encoder is wrong.',
0x102B: 'An Invalid encoding name is specified.',
0x102C: 'The lengh of the key of encryption is invalid.',
0x102D: '1. An invalid font handle was set.\n2. Unsupported font format.',
0x102E: 'Internal error. The consistency of the data was lost.',
0x102F: 'A font which has the specified name is not found.',
0x1030: 'Unsupported image format.',
0x1031: 'Unsupported image format.',
0x1032: 'Cannot read a postscript-name from an afm file.',
0x1033: '1. An invalid object is set.\n2. Internal error. The consistency of the data was lost.',
0x1034: 'Internal error. The consistency of the data was lost.',
0x1035: '1. Invoked HPDF_Image_SetColorMask() against the image-object which was set a mask-image.',
0x1036: 'An invalid outline-handle was specified.',
0x1037: 'An invalid page-handle was specified.',
0x1038: 'An invalid pages-handle was specified. (internel error)',
0x1039: 'An invalid value is set.',
0x103B: 'Invalid PNG image format.',
0x103C: 'Internal error. The consistency of the data was lost.',
0x103D: 'Internal error. The "_FILE_NAME" entry for delayed loading is missing.',
0x103F: 'Invalid .TTC file format.',
0x1040: 'The index parameter was exceed the number of included fonts',
0x1041: 'Cannot read a width-data from an afm file.',
0x1042: 'Internal error. The consistency of the data was lost.',
0x1043: 'An error has returned from PNGLIB while loading an image.',
0x1044: 'Internal error. The consistency of the data was lost.',
0x1045: 'Internal error. The consistency of the data was lost.',
0x1049: 'Internal error. The consistency of the data was lost.',
0x104A: 'Internal error. The consistency of the data was lost.',
0x104B: 'Internal error. The consistency of the data was lost.',
0x104C: 'There are no graphics-states to be restored.',
0x104D: 'Internal error. The consistency of the data was lost.',
0x104E: 'The current font is not set.',
0x104F: 'An invalid font-handle was spacified.',
0x1050: 'An invalid font-size was set.',
0x1051: 'See Graphics mode.',
0x1052: 'Internal error. The consistency of the data was lost.',
0x1053: 'The specified value is not a multiple of 90.',
0x1054: 'An invalid page-size was set.',
0x1055: 'An invalid image-handle was set.',
0x1056: 'The specified value is out of range.',
0x1057: 'The specified value is out of range.',
0x1058: 'Unexpected EOF marker was detected.',
0x1059: 'Internal error. The consistency of the data was lost.',
0x105B: 'The length of the specified text is too long.',
0x105C: 'The execution of a function was skipped because of other errors.',
0x105D: 'This font cannot be embedded. (restricted by license)',
0x105E: 'Unsupported ttf format. (cannot find unicode cmap.)',
0x105F: 'Unsupported ttf format.',
0x1060: 'Unsupported ttf format. (cannot find a necessary table) ',
0x1061: 'Internal error. The consistency of the data was lost.',
0x1062: '1. The library is not configured to use PNGLIB.\n2. Internal error. The consistency of the data was lost.',
0x1063: 'Unsupported Jpeg format.',
0x1064: 'Failed to parse .PFB file.',
0x1065: 'Internal error. The consistency of the data was lost.',
0x1066: 'An error has occurred while executing a function of Zlib.',
0x1067: 'An error returned from Zlib.',
0x1068: 'An invalid URI was set.',
0x1069: 'An invalid page-layout was set.',
0x1070: 'An invalid page-mode was set.',
0x1071: 'An invalid page-num-style was set.',
0x1072: 'An invalid icon was set.',
0x1073: 'An invalid border-style was set.',
0x1074: 'An invalid page-direction was set.',
0x1075: 'An invalid font-handle was specified.',
} | Python |
def printf(format, *optional):
#print 'format="%s"' % format
#print 'optional="%s"' % optional
if len(optional)==1:
optional=optional[0]
if format.endswith('\n'):
format=format[:-1]
print format % optional
| Python |
##
## * << Haru Free PDF Library 2.0.8 >> -- hpdf.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os
import sys
import types
def setpath():
dllpath='%s/dll' %(os.path.dirname(os.path.realpath(__file__)))
if 'PATH' in os.environ:
if dllpath not in os.environ['PATH']:
os.environ['PATH']='%s;%s' % (dllpath, os.environ['PATH'])
else:
os.environ['PATH']=dllpath
setpath()
from hpdf_consts import *
from hpdf_types import *
if os.sys.platform=='win32':
harudll='libhpdf.dll'
#haru=WinDLL(harudll)
haru=CDLL(harudll)
else:
harudll='libhpdf.so'
haru=CDLL(harudll)
HPDF_HANDLE=c_void_p
HPDF_Doc=HPDF_HANDLE
HPDF_Page=HPDF_HANDLE
HPDF_Pages=HPDF_HANDLE
HPDF_Stream=HPDF_HANDLE
HPDF_Image=HPDF_HANDLE
HPDF_Font=HPDF_HANDLE
HPDF_Outline=HPDF_HANDLE
HPDF_Encoder=HPDF_HANDLE
HPDF_Destination=HPDF_HANDLE
HPDF_XObject=HPDF_HANDLE
HPDF_Annotation=HPDF_HANDLE
HPDF_ExtGState=HPDF_HANDLE
#const char * HPDF_GetVersion (void)
HPDF_GetVersion=haru.HPDF_GetVersion
HPDF_GetVersion.restype=c_char_p
#HPDF_Doc HPDF_NewEx (HPDF_Error_Handler user_error_fn, HPDF_Alloc_Func user_alloc_fn, HPDF_Free_Func user_free_fn, HPDF_UINT mem_pool_buf_size, void *user_data)
HPDF_NewEx=haru.HPDF_NewEx
HPDF_NewEx.restype=HPDF_Doc
#HPDF_Doc HPDF_New (HPDF_Error_Handler user_error_fn, void *user_data)
HPDF_New=haru.HPDF_New
HPDF_New.restype=HPDF_Doc
#HPDF_STATUS HPDF_SetErrorHandler (HPDF_Doc pdf, HPDF_Error_Handler user_error_fn)
HPDF_SetErrorHandler=haru.HPDF_SetErrorHandler
HPDF_SetErrorHandler.restype=HPDF_STATUS
#void HPDF_Free (HPDF_Doc pdf)
HPDF_Free=haru.HPDF_Free
HPDF_Free.restype=None
#HPDF_STATUS HPDF_NewDoc (HPDF_Doc pdf)
HPDF_NewDoc=haru.HPDF_NewDoc
HPDF_NewDoc.restype=HPDF_STATUS
#void HPDF_FreeDoc (HPDF_Doc pdf)
HPDF_FreeDoc=haru.HPDF_FreeDoc
HPDF_FreeDoc.restype=None
#HPDF_BOOL HPDF_HasDoc (HPDF_Doc pdf)
HPDF_HasDoc=haru.HPDF_HasDoc
HPDF_HasDoc.restype=HPDF_BOOL
#void HPDF_FreeDocAll (HPDF_Doc pdf)
HPDF_FreeDocAll=haru.HPDF_FreeDocAll
HPDF_FreeDocAll.restype=None
#HPDF_STATUS HPDF_SaveToStream (HPDF_Doc pdf)
HPDF_SaveToStream=haru.HPDF_SaveToStream
HPDF_SaveToStream.restype=HPDF_STATUS
#HPDF_UINT32 HPDF_GetStreamSize (HPDF_Doc pdf)
HPDF_GetStreamSize=haru.HPDF_GetStreamSize
HPDF_GetStreamSize.restype=HPDF_UINT32
#HPDF_STATUS HPDF_ReadFromStream (HPDF_Doc pdf, HPDF_BYTE *buf, HPDF_UINT32 *size)
_HPDF_ReadFromStream=haru.HPDF_ReadFromStream
_HPDF_ReadFromStream.restype=HPDF_STATUS
def HPDF_ReadFromStream(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
size, #POINTER(HPDF_UINT32)
):
if type(buf) in (types.ListType, types.TupleType):
size=len(buf)
buf=pointer((HPDF_BYTE*size)(*buf))
size=HPDF_UINT32(int(size))
return _HPDF_ReadFromStream(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
size, #POINTER(HPDF_UINT32)
)
#HPDF_STATUS HPDF_ResetStream (HPDF_Doc pdf)
HPDF_ResetStream=haru.HPDF_ResetStream
HPDF_ResetStream.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SaveToFile (HPDF_Doc pdf, const char *file_name)
HPDF_SaveToFile=haru.HPDF_SaveToFile
HPDF_SaveToFile.restype=HPDF_STATUS
#HPDF_STATUS HPDF_GetError (HPDF_Doc pdf)
HPDF_GetError=haru.HPDF_GetError
HPDF_GetError.restype=HPDF_STATUS
#HPDF_STATUS HPDF_GetErrorDetail (HPDF_Doc pdf)
HPDF_GetErrorDetail=haru.HPDF_GetErrorDetail
HPDF_GetErrorDetail.restype=HPDF_STATUS
#void HPDF_ResetError (HPDF_Doc pdf)
HPDF_ResetError=haru.HPDF_ResetError
HPDF_ResetError.restype=None
#HPDF_STATUS HPDF_SetPagesConfiguration (HPDF_Doc pdf, HPDF_UINT page_per_pages)
_HPDF_SetPagesConfiguration=haru.HPDF_SetPagesConfiguration
_HPDF_SetPagesConfiguration.restype=HPDF_STATUS
def HPDF_SetPagesConfiguration(
pdf, #HPDF_Doc
page_per_pages, #HPDF_UINT
):
page_per_pages=HPDF_UINT(int(page_per_pages))
return _HPDF_SetPagesConfiguration(
pdf, #HPDF_Doc
page_per_pages, #HPDF_UINT
)
#HPDF_Page HPDF_GetPageByIndex (HPDF_Doc pdf, HPDF_UINT index)
HPDF_GetPageByIndex=haru.HPDF_GetPageByIndex
HPDF_GetPageByIndex.restype=HPDF_Page
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#HPDF_PageLayout HPDF_GetPageLayout (HPDF_Doc pdf)
HPDF_GetPageLayout=haru.HPDF_GetPageLayout
HPDF_GetPageLayout.restype=HPDF_PageLayout
#HPDF_STATUS HPDF_SetPageLayout (HPDF_Doc pdf, HPDF_PageLayout layout)
HPDF_SetPageLayout=haru.HPDF_SetPageLayout
HPDF_SetPageLayout.restype=HPDF_STATUS
#HPDF_PageMode HPDF_GetPageMode (HPDF_Doc pdf)
HPDF_GetPageMode=haru.HPDF_GetPageMode
HPDF_GetPageMode.restype=HPDF_PageMode
#HPDF_STATUS HPDF_SetPageMode (HPDF_Doc pdf, HPDF_PageMode mode)
HPDF_SetPageMode=haru.HPDF_SetPageMode
HPDF_SetPageMode.restype=HPDF_STATUS
#HPDF_UINT HPDF_GetViewerPreference (HPDF_Doc pdf)
HPDF_GetViewerPreference=haru.HPDF_GetViewerPreference
HPDF_GetViewerPreference.restype=HPDF_UINT
#HPDF_STATUS HPDF_SetViewerPreference (HPDF_Doc pdf, HPDF_UINT value)
HPDF_SetViewerPreference=haru.HPDF_SetViewerPreference
HPDF_SetViewerPreference.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SetOpenAction (HPDF_Doc pdf, HPDF_Destination open_action)
HPDF_SetOpenAction=haru.HPDF_SetOpenAction
HPDF_SetOpenAction.restype=HPDF_STATUS
#---------------------------------------------------------------------------
#----- page handling -------------------------------------------------------
#HPDF_Page HPDF_GetCurrentPage (HPDF_Doc pdf)
HPDF_GetCurrentPage=haru.HPDF_GetCurrentPage
HPDF_GetCurrentPage.restype=HPDF_Page
#HPDF_Page HPDF_AddPage (HPDF_Doc pdf)
HPDF_AddPage=haru.HPDF_AddPage
HPDF_AddPage.restype=HPDF_Page
#HPDF_Page HPDF_InsertPage (HPDF_Doc pdf, HPDF_Page page)
HPDF_InsertPage=haru.HPDF_InsertPage
HPDF_InsertPage.restype=HPDF_Page
#HPDF_STATUS HPDF_Page_SetWidth (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetWidth=haru.HPDF_Page_SetWidth
_HPDF_Page_SetWidth.restype=HPDF_STATUS
def HPDF_Page_SetWidth(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetWidth(
page, #HPDF_Page
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_SetHeight (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetHeight=haru.HPDF_Page_SetHeight
_HPDF_Page_SetHeight.restype=HPDF_STATUS
def HPDF_Page_SetHeight(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetHeight(
page, #HPDF_Page
value, #HPDF_REAL
)
#HPDF_STATUS
#HPDF_Page_SetSize (HPDF_Page page,
# HPDF_PageSizes size,
# HPDF_PageDirection direction);
HPDF_Page_SetSize=haru.HPDF_Page_SetSize
HPDF_Page_SetSize.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Page_SetRotate (HPDF_Page page, HPDF_UINT16 angle)
_HPDF_Page_SetRotate=haru.HPDF_Page_SetRotate
_HPDF_Page_SetRotate.restype=HPDF_STATUS
def HPDF_Page_SetRotate(
page, #HPDF_Page
angle, #HPDF_UINT16
):
angle=HPDF_UINT16(int(angle))
return _HPDF_Page_SetRotate(
page, #HPDF_Page
angle, #HPDF_UINT16
)
#---------------------------------------------------------------------------
#----- font handling -------------------------------------------------------
#HPDF_Font HPDF_GetFont (HPDF_Doc pdf, const char *font_name, const char *encoding_name)
HPDF_GetFont=haru.HPDF_GetFont
HPDF_GetFont.restype=HPDF_Font
#const char* HPDF_LoadType1FontFromFile (HPDF_Doc pdf, const char *afm_file_name, const char *data_file_name)
HPDF_LoadType1FontFromFile=haru.HPDF_LoadType1FontFromFile
HPDF_LoadType1FontFromFile.restype=c_char_p
#const char* HPDF_LoadTTFontFromFile (HPDF_Doc pdf, const char *file_name, HPDF_BOOL embedding)
HPDF_LoadTTFontFromFile=haru.HPDF_LoadTTFontFromFile
HPDF_LoadTTFontFromFile.restype=c_char_p
#const char* HPDF_LoadTTFontFromFile2 (HPDF_Doc pdf, const char *file_name, HPDF_UINT index, HPDF_BOOL embedding)
HPDF_LoadTTFontFromFile2=haru.HPDF_LoadTTFontFromFile2
HPDF_LoadTTFontFromFile2.restype=c_char_p
#HPDF_STATUS HPDF_AddPageLabel (HPDF_Doc pdf, HPDF_UINT page_num, HPDF_PageNumStyle style, HPDF_UINT first_page, const char *prefix)
_HPDF_AddPageLabel=haru.HPDF_AddPageLabel
_HPDF_AddPageLabel.restype=HPDF_STATUS
def HPDF_AddPageLabel(
pdf, #HPDF_Doc
page_num, #HPDF_UINT
style, #HPDF_PageNumStyle
first_page, #HPDF_UINT
prefix, #c_char_p
):
page_num, first_page=[HPDF_UINT(int(i))for i in (page_num, first_page)]
return _HPDF_AddPageLabel(
pdf, #HPDF_Doc
page_num, #HPDF_UINT
style, #HPDF_PageNumStyle
first_page, #HPDF_UINT
prefix, #c_char_p
)
#HPDF_STATUS HPDF_UseJPFonts (HPDF_Doc pdf)
HPDF_UseJPFonts=haru.HPDF_UseJPFonts
HPDF_UseJPFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseKRFonts (HPDF_Doc pdf)
HPDF_UseKRFonts=haru.HPDF_UseKRFonts
HPDF_UseKRFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNSFonts (HPDF_Doc pdf)
HPDF_UseCNSFonts=haru.HPDF_UseCNSFonts
HPDF_UseCNSFonts.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNTFonts (HPDF_Doc pdf)
HPDF_UseCNTFonts=haru.HPDF_UseCNTFonts
HPDF_UseCNTFonts.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- outline ------------------------------------------------------------
#HPDF_Outline HPDF_CreateOutline (HPDF_Doc pdf, HPDF_Outline parent, const char *title, HPDF_Encoder encoder)
HPDF_CreateOutline=haru.HPDF_CreateOutline
HPDF_CreateOutline.restype=HPDF_Outline
#HPDF_STATUS HPDF_Outline_SetOpened (HPDF_Outline outline, HPDF_BOOL opened)
HPDF_Outline_SetOpened=haru.HPDF_Outline_SetOpened
HPDF_Outline_SetOpened.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Outline_SetDestination (HPDF_Outline outline, HPDF_Destination dst)
HPDF_Outline_SetDestination=haru.HPDF_Outline_SetDestination
HPDF_Outline_SetDestination.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- destination --------------------------------------------------------
#HPDF_Destination HPDF_Page_CreateDestination (HPDF_Page page)
HPDF_Page_CreateDestination=haru.HPDF_Page_CreateDestination
HPDF_Page_CreateDestination.restype=HPDF_Destination
#HPDF_STATUS HPDF_Destination_SetXYZ (HPDF_Destination dst, HPDF_REAL left, HPDF_REAL top, HPDF_REAL zoom)
_HPDF_Destination_SetXYZ=haru.HPDF_Destination_SetXYZ
_HPDF_Destination_SetXYZ.restype=HPDF_STATUS
def HPDF_Destination_SetXYZ(
dst, #HPDF_Destination
left, #HPDF_REAL
top, #HPDF_REAL
zoom, #HPDF_REAL
):
left=HPDF_REAL(left)
top=HPDF_REAL(top)
zoom=HPDF_REAL(zoom)
return _HPDF_Destination_SetXYZ(
dst, #HPDF_Destination
left, #HPDF_REAL
top, #HPDF_REAL
zoom, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFit (HPDF_Destination dst)
HPDF_Destination_SetFit=haru.HPDF_Destination_SetFit
HPDF_Destination_SetFit.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Destination_SetFitH (HPDF_Destination dst, HPDF_REAL top)
_HPDF_Destination_SetFitH=haru.HPDF_Destination_SetFitH
_HPDF_Destination_SetFitH.restype=HPDF_STATUS
def HPDF_Destination_SetFitH(
dst, #HPDF_Destination
top, #HPDF_REAL
):
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitH(
dst, #HPDF_Destination
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitV (HPDF_Destination dst, HPDF_REAL left)
_HPDF_Destination_SetFitV=haru.HPDF_Destination_SetFitV
_HPDF_Destination_SetFitV.restype=HPDF_STATUS
def HPDF_Destination_SetFitV(
dst, #HPDF_Destination
left, #HPDF_REAL
):
left=HPDF_REAL(left)
return _HPDF_Destination_SetFitV(
dst, #HPDF_Destination
left, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitR (HPDF_Destination dst, HPDF_REAL left, HPDF_REAL bottom, HPDF_REAL right, HPDF_REAL top)
_HPDF_Destination_SetFitR=haru.HPDF_Destination_SetFitR
_HPDF_Destination_SetFitR.restype=HPDF_STATUS
def HPDF_Destination_SetFitR(
dst, #HPDF_Destination
left, #HPDF_REAL
bottom, #HPDF_REAL
right, #HPDF_REAL
top, #HPDF_REAL
):
left=HPDF_REAL(left)
bottom=HPDF_REAL(bottom)
right=HPDF_REAL(right)
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitR(
dst, #HPDF_Destination
left, #HPDF_REAL
bottom, #HPDF_REAL
right, #HPDF_REAL
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitB (HPDF_Destination dst)
HPDF_Destination_SetFitB=haru.HPDF_Destination_SetFitB
HPDF_Destination_SetFitB.restype=HPDF_STATUS
#HPDF_STATUS HPDF_Destination_SetFitBH (HPDF_Destination dst, HPDF_REAL top)
_HPDF_Destination_SetFitBH=haru.HPDF_Destination_SetFitBH
_HPDF_Destination_SetFitBH.restype=HPDF_STATUS
def HPDF_Destination_SetFitBH(
dst, #HPDF_Destination
top, #HPDF_REAL
):
top=HPDF_REAL(top)
return _HPDF_Destination_SetFitBH(
dst, #HPDF_Destination
top, #HPDF_REAL
)
#HPDF_STATUS HPDF_Destination_SetFitBV (HPDF_Destination dst, HPDF_REAL left)
_HPDF_Destination_SetFitBV=haru.HPDF_Destination_SetFitBV
_HPDF_Destination_SetFitBV.restype=HPDF_STATUS
def HPDF_Destination_SetFitBV(
dst, #HPDF_Destination
left, #HPDF_REAL
):
left=HPDF_REAL(left)
return _HPDF_Destination_SetFitBV(
dst, #HPDF_Destination
left, #HPDF_REAL
)
#--------------------------------------------------------------------------
#----- encoder ------------------------------------------------------------
#HPDF_Encoder HPDF_GetEncoder (HPDF_Doc pdf, const char *encoding_name)
HPDF_GetEncoder=haru.HPDF_GetEncoder
HPDF_GetEncoder.restype=HPDF_Encoder
#HPDF_Encoder HPDF_GetCurrentEncoder (HPDF_Doc pdf)
HPDF_GetCurrentEncoder=haru.HPDF_GetCurrentEncoder
HPDF_GetCurrentEncoder.restype=HPDF_Encoder
#HPDF_STATUS HPDF_SetCurrentEncoder (HPDF_Doc pdf, const char *encoding_name)
HPDF_SetCurrentEncoder=haru.HPDF_SetCurrentEncoder
HPDF_SetCurrentEncoder.restype=HPDF_STATUS
#HPDF_EncoderType HPDF_Encoder_GetType (HPDF_Encoder encoder)
HPDF_Encoder_GetType=haru.HPDF_Encoder_GetType
HPDF_Encoder_GetType.restype=HPDF_EncoderType
#HPDF_ByteType HPDF_Encoder_GetByteType (HPDF_Encoder encoder, const char *text, HPDF_UINT index)
_HPDF_Encoder_GetByteType=haru.HPDF_Encoder_GetByteType
_HPDF_Encoder_GetByteType.restype=HPDF_ByteType
def HPDF_Encoder_GetByteType(
encoder, #HPDF_Encoder
text, #const char *
index #HPDF_UINT
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Encoder_GetByteType(
encoder, #HPDF_Encoder
text, #const char *
index #HPDF_UINT
)
#HPDF_UNICODE HPDF_Encoder_GetUnicode (HPDF_Encoder encoder, HPDF_UINT16 code)
HPDF_Encoder_GetUnicode=haru.HPDF_Encoder_GetUnicode
HPDF_Encoder_GetUnicode.restype=HPDF_UNICODE
#HPDF_WritingMode HPDF_Encoder_GetWritingMode (HPDF_Encoder encoder)
HPDF_Encoder_GetWritingMode=haru.HPDF_Encoder_GetWritingMode
HPDF_Encoder_GetWritingMode.restype=HPDF_WritingMode
#HPDF_STATUS HPDF_UseJPEncodings (HPDF_Doc pdf)
HPDF_UseJPEncodings=haru.HPDF_UseJPEncodings
HPDF_UseJPEncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseKREncodings (HPDF_Doc pdf)
HPDF_UseKREncodings=haru.HPDF_UseKREncodings
HPDF_UseKREncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNSEncodings (HPDF_Doc pdf)
HPDF_UseCNSEncodings=haru.HPDF_UseCNSEncodings
HPDF_UseCNSEncodings.restype=HPDF_STATUS
#HPDF_STATUS HPDF_UseCNTEncodings (HPDF_Doc pdf)
HPDF_UseCNTEncodings=haru.HPDF_UseCNTEncodings
HPDF_UseCNTEncodings.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- annotation ---------------------------------------------------------
#HPDF_Annotation HPDF_Page_CreateTextAnnot (HPDF_Page page, HPDF_Rect rect, const char *text, HPDF_Encoder encoder)
HPDF_Page_CreateTextAnnot=haru.HPDF_Page_CreateTextAnnot
HPDF_Page_CreateTextAnnot.restype=HPDF_Annotation
#HPDF_Annotation HPDF_Page_CreateLinkAnnot (HPDF_Page page, HPDF_Rect rect, HPDF_Destination dst)
HPDF_Page_CreateLinkAnnot=haru.HPDF_Page_CreateLinkAnnot
HPDF_Page_CreateLinkAnnot.restype=HPDF_Annotation
#HPDF_Annotation HPDF_Page_CreateURILinkAnnot (HPDF_Page page, HPDF_Rect rect, const char *uri)
HPDF_Page_CreateURILinkAnnot=haru.HPDF_Page_CreateURILinkAnnot
HPDF_Page_CreateURILinkAnnot.restype=HPDF_Annotation
#HPDF_STATUS HPDF_LinkAnnot_SetHighlightMode (HPDF_Annotation annot, HPDF_AnnotHighlightMode mode)
HPDF_LinkAnnot_SetHighlightMode=haru.HPDF_LinkAnnot_SetHighlightMode
HPDF_LinkAnnot_SetHighlightMode.restype=HPDF_STATUS
#HPDF_STATUS HPDF_LinkAnnot_SetBorderStyle (HPDF_Annotation annot, HPDF_REAL width, HPDF_UINT16 dash_on, HPDF_UINT16 dash_off)
_HPDF_LinkAnnot_SetBorderStyle=haru.HPDF_LinkAnnot_SetBorderStyle
_HPDF_LinkAnnot_SetBorderStyle.restype=HPDF_STATUS
def HPDF_LinkAnnot_SetBorderStyle(
annot, #HPDF_Annotation
width, #HPDF_REAL
dash_on, #HPDF_UINT16
dash_off, #HPDF_UINT16
):
width=HPDF_REAL(width)
dash_on=HPDF_UINT16(dash_on)
dash_off=HPDF_UINT16(dash_off)
return _HPDF_LinkAnnot_SetBorderStyle(
annot, #HPDF_Annotation
width, #HPDF_REAL
dash_on, #HPDF_UINT16
dash_off, #HPDF_UINT16
)
#HPDF_STATUS HPDF_TextAnnot_SetIcon (HPDF_Annotation annot, HPDF_AnnotIcon icon)
HPDF_TextAnnot_SetIcon=haru.HPDF_TextAnnot_SetIcon
HPDF_TextAnnot_SetIcon.restype=HPDF_STATUS
#HPDF_STATUS HPDF_TextAnnot_SetOpened (HPDF_Annotation annot, HPDF_BOOL opened)
HPDF_TextAnnot_SetOpened=haru.HPDF_TextAnnot_SetOpened
HPDF_TextAnnot_SetOpened.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- image data ---------------------------------------------------------
#HPDF_Image HPDF_LoadPngImageFromFile (HPDF_Doc pdf, const char *filename)
HPDF_LoadPngImageFromFile=haru.HPDF_LoadPngImageFromFile
HPDF_LoadPngImageFromFile.restype=HPDF_Image
#HPDF_Image HPDF_LoadPngImageFromFile2 (HPDF_Doc pdf, const char *filename)
HPDF_LoadPngImageFromFile2=haru.HPDF_LoadPngImageFromFile2
HPDF_LoadPngImageFromFile2.restype=HPDF_Image
#HPDF_Image HPDF_LoadJpegImageFromFile (HPDF_Doc pdf, const char *filename)
HPDF_LoadJpegImageFromFile=haru.HPDF_LoadJpegImageFromFile
HPDF_LoadJpegImageFromFile.restype=HPDF_Image
#HPDF_Image HPDF_LoadRawImageFromFile (HPDF_Doc pdf, const char *filename, HPDF_UINT width, HPDF_UINT height, HPDF_ColorSpace color_space)
_HPDF_LoadRawImageFromFile=haru.HPDF_LoadRawImageFromFile
_HPDF_LoadRawImageFromFile.restype=HPDF_Image
def HPDF_LoadRawImageFromFile(
pdf, #HPDF_Doc
filename, #c_char_p
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
):
width=HPDF_UINT(width)
height=HPDF_UINT(height)
return _HPDF_LoadRawImageFromFile(
pdf, #HPDF_Doc
filename, #c_char_p
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
)
#HPDF_Image HPDF_LoadRawImageFromMem (HPDF_Doc pdf, const HPDF_BYTE *buf, HPDF_UINT width, HPDF_UINT height, HPDF_ColorSpace color_space, HPDF_UINT bits_per_component)
_HPDF_LoadRawImageFromMem=haru.HPDF_LoadRawImageFromMem
_HPDF_LoadRawImageFromMem.restype=HPDF_Image
def HPDF_LoadRawImageFromMem(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
bits_per_component, #HPDF_UINT
):
if type(buf) in (types.ListType, types.TupleType):
size=len(buf)
buf=pointer((HPDF_BYTE*size)(*buf))
if height in [0, None]:
height=size/width
width=HPDF_UINT(width)
height=HPDF_UINT(height)
bits_per_component=HPDF_UINT(bits_per_component)
return _HPDF_LoadRawImageFromMem(
pdf, #HPDF_Doc
buf, #POINTER(HPDF_BYTE)
width, #HPDF_UINT
height, #HPDF_UINT
color_space, #HPDF_ColorSpace
bits_per_component, #HPDF_UINT
)
#HPDF_Point HPDF_Image_GetSize (HPDF_Image image)
HPDF_Image_GetSize=haru.HPDF_Image_GetSize
HPDF_Image_GetSize.restype=HPDF_Point
#HPDF_STATUS HPDF_Image_GetSize2 (HPDF_Image image, HPDF_Point *size)
_HPDF_Image_GetSize2=haru.HPDF_Image_GetSize2
_HPDF_Image_GetSize2.restype=HPDF_STATUS
def HPDF_Image_GetSize2(
image, #HPDF_Image
size=None, #POINTER(HPDF_Point)
):
size=HPDF_Point
ret= _HPDF_Image_GetSize2(
image, #HPDF_Image
size, #POINTER(HPDF_Point)
)
return ret, size.x, size.y
#HPDF_UINT HPDF_Image_GetWidth (HPDF_Image image)
HPDF_Image_GetWidth=haru.HPDF_Image_GetWidth
HPDF_Image_GetWidth.restype=HPDF_UINT
#HPDF_UINT HPDF_Image_GetHeight (HPDF_Image image)
HPDF_Image_GetHeight=haru.HPDF_Image_GetHeight
HPDF_Image_GetHeight.restype=HPDF_UINT
#HPDF_UINT HPDF_Image_GetBitsPerComponent (HPDF_Image image)
HPDF_Image_GetBitsPerComponent=haru.HPDF_Image_GetBitsPerComponent
HPDF_Image_GetBitsPerComponent.restype=HPDF_UINT
#const char* HPDF_Image_GetColorSpace (HPDF_Image image)
HPDF_Image_GetColorSpace=haru.HPDF_Image_GetColorSpace
HPDF_Image_GetColorSpace.restype=c_char_p
#HPDF_STATUS HPDF_Image_SetColorMask (HPDF_Image image, HPDF_UINT rmin, HPDF_UINT rmax, HPDF_UINT gmin, HPDF_UINT gmax, HPDF_UINT bmin, HPDF_UINT bmax)
_HPDF_Image_SetColorMask=haru.HPDF_Image_SetColorMask
_HPDF_Image_SetColorMask.restype=HPDF_STATUS
def HPDF_Image_SetColorMask(
image, #HPDF_Image
rmin, #HPDF_UINT
rmax, #HPDF_UINT
gmin, #HPDF_UINT
gmax, #HPDF_UINT
bmin, #HPDF_UINT
bmax, #HPDF_UINT
):
rmin=HPDF_UINT(rmin)
rmax=HPDF_UINT(rmax)
gmin=HPDF_UINT(gmin)
gmax=HPDF_UINT(gmax)
bmin=HPDF_UINT(bmin)
bmax=HPDF_UINT(bmax)
return _HPDF_Image_SetColorMask(
image, #HPDF_Image
rmin, #HPDF_UINT
rmax, #HPDF_UINT
gmin, #HPDF_UINT
gmax, #HPDF_UINT
bmin, #HPDF_UINT
bmax, #HPDF_UINT
)
#HPDF_STATUS HPDF_Image_SetMaskImage (HPDF_Image image, HPDF_Image mask_image)
HPDF_Image_SetMaskImage=haru.HPDF_Image_SetMaskImage
HPDF_Image_SetMaskImage.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- info dictionary ----------------------------------------------------
#HPDF_STATUS HPDF_SetInfoAttr (HPDF_Doc pdf, HPDF_InfoType type, const char *value)
HPDF_SetInfoAttr=haru.HPDF_SetInfoAttr
HPDF_SetInfoAttr.restype=HPDF_STATUS
#const char* HPDF_GetInfoAttr (HPDF_Doc pdf, HPDF_InfoType type)
HPDF_GetInfoAttr=haru.HPDF_GetInfoAttr
HPDF_GetInfoAttr.restype=c_char_p
#HPDF_STATUS HPDF_SetInfoDateAttr (HPDF_Doc pdf, HPDF_InfoType type, HPDF_Date value)
HPDF_SetInfoDateAttr=haru.HPDF_SetInfoDateAttr
HPDF_SetInfoDateAttr.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- encryption ---------------------------------------------------------
#HPDF_STATUS HPDF_SetPassword (HPDF_Doc pdf, const char *owner_passwd, const char *user_passwd)
HPDF_SetPassword=haru.HPDF_SetPassword
HPDF_SetPassword.restype=HPDF_STATUS
#HPDF_STATUS HPDF_SetPermission (HPDF_Doc pdf, HPDF_UINT permission)
_HPDF_SetPermission=haru.HPDF_SetPermission
_HPDF_SetPermission.restype=HPDF_STATUS
def HPDF_SetPermission(
pdf, #HPDF_Doc
permission, #HPDF_UINT
):
permission=HPDF_UINT(int(permission))
return _HPDF_SetPermission(
pdf, #HPDF_Doc
permission, #HPDF_UINT
)
#HPDF_STATUS HPDF_SetEncryptionMode (HPDF_Doc pdf, HPDF_EncryptMode mode, HPDF_UINT key_len)
_HPDF_SetEncryptionMode=haru.HPDF_SetEncryptionMode
_HPDF_SetEncryptionMode.restype=HPDF_STATUS
def HPDF_SetEncryptionMode(
pdf, #HPDF_Doc
mode, #HPDF_EncryptMode
key_len, #HPDF_UINT
):
key_len=HPDF_UINT(int(key_len))
return _HPDF_SetEncryptionMode(
pdf, #HPDF_Doc
mode, #HPDF_EncryptMode
key_len, #HPDF_UINT
)
#--------------------------------------------------------------------------
#----- compression --------------------------------------------------------
#HPDF_STATUS HPDF_SetCompressionMode (HPDF_Doc pdf, HPDF_UINT mode)
HPDF_SetCompressionMode=haru.HPDF_SetCompressionMode
HPDF_SetCompressionMode.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#----- font ---------------------------------------------------------------
#const char* HPDF_Font_GetFontName (HPDF_Font font)
HPDF_Font_GetFontName=haru.HPDF_Font_GetFontName
HPDF_Font_GetFontName.restype=c_char_p
#const char* HPDF_Font_GetEncodingName (HPDF_Font font)
HPDF_Font_GetEncodingName=haru.HPDF_Font_GetEncodingName
HPDF_Font_GetEncodingName.restype=c_char_p
#HPDF_INT HPDF_Font_GetUnicodeWidth (HPDF_Font font, HPDF_UNICODE code)
HPDF_Font_GetUnicodeWidth=haru.HPDF_Font_GetUnicodeWidth
HPDF_Font_GetUnicodeWidth.restype=HPDF_INT
#HPDF_Box HPDF_Font_GetBBox (HPDF_Font font)
HPDF_Font_GetBBox=haru.HPDF_Font_GetBBox
HPDF_Font_GetBBox.restype=HPDF_Box
#HPDF_INT HPDF_Font_GetAscent (HPDF_Font font)
HPDF_Font_GetAscent=haru.HPDF_Font_GetAscent
HPDF_Font_GetAscent.restype=HPDF_INT
#HPDF_INT HPDF_Font_GetDescent (HPDF_Font font)
HPDF_Font_GetDescent=haru.HPDF_Font_GetDescent
HPDF_Font_GetDescent.restype=HPDF_INT
#HPDF_UINT HPDF_Font_GetXHeight (HPDF_Font font)
HPDF_Font_GetXHeight=haru.HPDF_Font_GetXHeight
HPDF_Font_GetXHeight.restype=HPDF_UINT
#HPDF_UINT HPDF_Font_GetCapHeight (HPDF_Font font)
HPDF_Font_GetCapHeight=haru.HPDF_Font_GetCapHeight
HPDF_Font_GetCapHeight.restype=HPDF_UINT
#HPDF_TextWidth HPDF_Font_TextWidth (HPDF_Font font, const HPDF_BYTE *text, HPDF_UINT len)
HPDF_Font_TextWidth=haru.HPDF_Font_TextWidth
HPDF_Font_TextWidth.restype=HPDF_TextWidth
#HPDF_UINT HPDF_Font_MeasureText (HPDF_Font font, const HPDF_BYTE *text, HPDF_UINT len, HPDF_REAL width, HPDF_REAL font_size, HPDF_REAL char_space, HPDF_REAL word_space, HPDF_BOOL wordwrap, HPDF_REAL *real_width)
_HPDF_Font_MeasureText=haru.HPDF_Font_MeasureText
_HPDF_Font_MeasureText.restype=HPDF_UINT
def HPDF_Font_MeasureText(
font, #HPDF_Font
text, #POINTER(HPDF_BYTE)
length, #HPDF_UINT
width, #HPDF_REAL
font_size, #HPDF_REAL
char_space, #HPDF_REAL
word_space, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
):
if type(text) in (types.TupleType, types.ListType):
length=len(text)
text=pointer((HPDF_BYTE*length)(*text))
length=HPDF_UINT(int(length))
width=HPDF_REAL(width)
font_size=HPDF_REAL(font_size)
char_space=HPDF_REAL(char_space)
word_space=HPDF_REAL(word_space)
real_width=HPDF_REAL(real_width)
return _HPDF_Font_MeasureText(
font, #HPDF_Font
text, #POINTER(HPDF_BYTE)
length, #HPDF_UINT
width, #HPDF_REAL
font_size, #HPDF_REAL
char_space, #HPDF_REAL
word_space, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
)
#--------------------------------------------------------------------------
#----- extended graphics state --------------------------------------------
#HPDF_ExtGState HPDF_CreateExtGState (HPDF_Doc pdf)
HPDF_CreateExtGState=haru.HPDF_CreateExtGState
HPDF_CreateExtGState.restype=HPDF_ExtGState
#HPDF_STATUS HPDF_ExtGState_SetAlphaStroke (HPDF_ExtGState ext_gstate, HPDF_REAL value)
_HPDF_ExtGState_SetAlphaStroke=haru.HPDF_ExtGState_SetAlphaStroke
_HPDF_ExtGState_SetAlphaStroke.restype=HPDF_STATUS
def HPDF_ExtGState_SetAlphaStroke(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_ExtGState_SetAlphaStroke(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_ExtGState_SetAlphaFill (HPDF_ExtGState ext_gstate, HPDF_REAL value)
_HPDF_ExtGState_SetAlphaFill=haru.HPDF_ExtGState_SetAlphaFill
_HPDF_ExtGState_SetAlphaFill.restype=HPDF_STATUS
def HPDF_ExtGState_SetAlphaFill(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_ExtGState_SetAlphaFill(
ext_gstate, #HPDF_ExtGState
value, #HPDF_REAL
)
#HPDF_STATUS HPDF_ExtGState_SetBlendMode (HPDF_ExtGState ext_gstate, HPDF_BlendMode mode)
HPDF_ExtGState_SetBlendMode=haru.HPDF_ExtGState_SetBlendMode
HPDF_ExtGState_SetBlendMode.restype=HPDF_STATUS
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#HPDF_REAL HPDF_Page_TextWidth (HPDF_Page page, const char *text)
_HPDF_Page_TextWidth=haru.HPDF_Page_TextWidth
_HPDF_Page_TextWidth.restype=HPDF_REAL
def HPDF_Page_TextWidth(
page, #HPDF_Page
text, #c_char_p
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_TextWidth(
page, #HPDF_Page
text, #c_char_p
)
#HPDF_UINT HPDF_Page_MeasureText (HPDF_Page page, const char *text, HPDF_REAL width, HPDF_BOOL wordwrap, HPDF_REAL *real_width)
_HPDF_Page_MeasureText=haru.HPDF_Page_MeasureText
_HPDF_Page_MeasureText.restype=HPDF_UINT
def HPDF_Page_MeasureText(
page, #HPDF_Page
text, #c_char_p
width, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
):
width=HPDF_REAL(width)
real_width=HPDF_REAL(real_width)
return _HPDF_Page_MeasureText(
page, #HPDF_Page
text, #c_char_p
width, #HPDF_REAL
wordwrap, #HPDF_BOOL
real_width, #POINTER(HPDF_REAL)
)
#HPDF_REAL
#HPDF_Page_GetWidth (HPDF_Page page);
HPDF_Page_GetWidth=haru.HPDF_Page_GetWidth
HPDF_Page_GetWidth.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetHeight (HPDF_Page page)
HPDF_Page_GetHeight=haru.HPDF_Page_GetHeight
HPDF_Page_GetHeight.restype=HPDF_REAL
#HPDF_UINT16 HPDF_Page_GetGMode (HPDF_Page page)
HPDF_Page_GetGMode=haru.HPDF_Page_GetGMode
HPDF_Page_GetGMode.restype=HPDF_UINT16
#HPDF_Point HPDF_Page_GetCurrentPos (HPDF_Page page)
HPDF_Page_GetCurrentPos=haru.HPDF_Page_GetCurrentPos
HPDF_Page_GetCurrentPos.restype=HPDF_Point
#HPDF_STATUS HPDF_Page_GetCurrentPos2 (HPDF_Page page, HPDF_Point *pos)
_HPDF_Page_GetCurrentPos2=haru.HPDF_Page_GetCurrentPos2
_HPDF_Page_GetCurrentPos2.restype=HPDF_STATUS
def HPDF_Page_GetCurrentPos2(
page, #HPDF_Page
pos=None, #POINTER(HPDF_Point)
):
pos=HPDF_Point()
ret= _HPDF_Page_GetCurrentPos2(
page, #HPDF_Page
pos, #POINTER(HPDF_Point)
)
return ret, pos.x, pos.y
#HPDF_Point HPDF_Page_GetCurrentTextPos (HPDF_Page page)
HPDF_Page_GetCurrentTextPos=haru.HPDF_Page_GetCurrentTextPos
HPDF_Page_GetCurrentTextPos.restype=HPDF_Point
#HPDF_STATUS HPDF_Page_GetCurrentTextPos2 (HPDF_Page page, HPDF_Point *pos)
_HPDF_Page_GetCurrentTextPos2=haru.HPDF_Page_GetCurrentTextPos2
_HPDF_Page_GetCurrentTextPos2.restype=HPDF_STATUS
def HPDF_Page_GetCurrentTextPos2(
page, #HPDF_Page
pos=None, #POINTER(HPDF_Point)
):
pos=HPDF_Point()
ret= _HPDF_Page_GetCurrentTextPos2(
page, #HPDF_Page
pos, #POINTER(HPDF_Point)
)
return ret, pos.x, pos.y
#HPDF_Font HPDF_Page_GetCurrentFont (HPDF_Page page)
HPDF_Page_GetCurrentFont=haru.HPDF_Page_GetCurrentFont
HPDF_Page_GetCurrentFont.restype=HPDF_Font
#HPDF_REAL HPDF_Page_GetCurrentFontSize (HPDF_Page page)
HPDF_Page_GetCurrentFontSize=haru.HPDF_Page_GetCurrentFontSize
HPDF_Page_GetCurrentFontSize.restype=HPDF_REAL
#HPDF_TransMatrix HPDF_Page_GetTransMatrix (HPDF_Page page)
HPDF_Page_GetTransMatrix=haru.HPDF_Page_GetTransMatrix
HPDF_Page_GetTransMatrix.restype=HPDF_TransMatrix
#HPDF_REAL HPDF_Page_GetLineWidth (HPDF_Page page)
HPDF_Page_GetLineWidth=haru.HPDF_Page_GetLineWidth
HPDF_Page_GetLineWidth.restype=HPDF_REAL
#HPDF_LineCap HPDF_Page_GetLineCap (HPDF_Page page)
HPDF_Page_GetLineCap=haru.HPDF_Page_GetLineCap
HPDF_Page_GetLineCap.restype=HPDF_LineCap
#HPDF_LineJoin HPDF_Page_GetLineJoin (HPDF_Page page)
HPDF_Page_GetLineJoin=haru.HPDF_Page_GetLineJoin
HPDF_Page_GetLineJoin.restype=HPDF_LineJoin
#HPDF_REAL HPDF_Page_GetMiterLimit (HPDF_Page page)
HPDF_Page_GetMiterLimit=haru.HPDF_Page_GetMiterLimit
HPDF_Page_GetMiterLimit.restype=HPDF_REAL
#HPDF_DashMode HPDF_Page_GetDash (HPDF_Page page)
HPDF_Page_GetDash=haru.HPDF_Page_GetDash
HPDF_Page_GetDash.restype=HPDF_DashMode
#HPDF_REAL HPDF_Page_GetFlat (HPDF_Page page)
HPDF_Page_GetFlat=haru.HPDF_Page_GetFlat
HPDF_Page_GetFlat.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetCharSpace (HPDF_Page page)
HPDF_Page_GetCharSpace=haru.HPDF_Page_GetCharSpace
HPDF_Page_GetCharSpace.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetWordSpace (HPDF_Page page)
HPDF_Page_GetWordSpace=haru.HPDF_Page_GetWordSpace
HPDF_Page_GetWordSpace.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetHorizontalScalling (HPDF_Page page)
HPDF_Page_GetHorizontalScalling=haru.HPDF_Page_GetHorizontalScalling
HPDF_Page_GetHorizontalScalling.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetTextLeading (HPDF_Page page)
HPDF_Page_GetTextLeading=haru.HPDF_Page_GetTextLeading
HPDF_Page_GetTextLeading.restype=HPDF_REAL
#HPDF_TextRenderingMode HPDF_Page_GetTextRenderingMode (HPDF_Page page)
HPDF_Page_GetTextRenderingMode=haru.HPDF_Page_GetTextRenderingMode
HPDF_Page_GetTextRenderingMode.restype=HPDF_TextRenderingMode
# This function is obsolete. Use HPDF_Page_GetTextRise.
#HPDF_REAL HPDF_Page_GetTextRaise (HPDF_Page page)
HPDF_Page_GetTextRaise=haru.HPDF_Page_GetTextRaise
HPDF_Page_GetTextRaise.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetTextRise (HPDF_Page page)
HPDF_Page_GetTextRise=haru.HPDF_Page_GetTextRise
HPDF_Page_GetTextRise.restype=HPDF_REAL
#HPDF_RGBColor HPDF_Page_GetRGBFill (HPDF_Page page)
HPDF_Page_GetRGBFill=haru.HPDF_Page_GetRGBFill
HPDF_Page_GetRGBFill.restype=HPDF_RGBColor
#HPDF_RGBColor HPDF_Page_GetRGBStroke (HPDF_Page page)
HPDF_Page_GetRGBStroke=haru.HPDF_Page_GetRGBStroke
HPDF_Page_GetRGBStroke.restype=HPDF_RGBColor
#HPDF_CMYKColor HPDF_Page_GetCMYKFill (HPDF_Page page)
HPDF_Page_GetCMYKFill=haru.HPDF_Page_GetCMYKFill
HPDF_Page_GetCMYKFill.restype=HPDF_CMYKColor
#HPDF_CMYKColor HPDF_Page_GetCMYKStroke (HPDF_Page page)
HPDF_Page_GetCMYKStroke=haru.HPDF_Page_GetCMYKStroke
HPDF_Page_GetCMYKStroke.restype=HPDF_CMYKColor
#HPDF_REAL HPDF_Page_GetGrayFill (HPDF_Page page)
HPDF_Page_GetGrayFill=haru.HPDF_Page_GetGrayFill
HPDF_Page_GetGrayFill.restype=HPDF_REAL
#HPDF_REAL HPDF_Page_GetGrayStroke (HPDF_Page page)
HPDF_Page_GetGrayStroke=haru.HPDF_Page_GetGrayStroke
HPDF_Page_GetGrayStroke.restype=HPDF_REAL
#HPDF_ColorSpace HPDF_Page_GetStrokingColorSpace (HPDF_Page page)
HPDF_Page_GetStrokingColorSpace=haru.HPDF_Page_GetStrokingColorSpace
HPDF_Page_GetStrokingColorSpace.restype=HPDF_ColorSpace
#HPDF_ColorSpace HPDF_Page_GetFillingColorSpace (HPDF_Page page)
HPDF_Page_GetFillingColorSpace=haru.HPDF_Page_GetFillingColorSpace
HPDF_Page_GetFillingColorSpace.restype=HPDF_ColorSpace
#HPDF_TransMatrix HPDF_Page_GetTextMatrix (HPDF_Page page)
HPDF_Page_GetTextMatrix=haru.HPDF_Page_GetTextMatrix
HPDF_Page_GetTextMatrix.restype=HPDF_TransMatrix
#HPDF_UINT HPDF_Page_GetGStateDepth (HPDF_Page page)
HPDF_Page_GetGStateDepth=haru.HPDF_Page_GetGStateDepth
HPDF_Page_GetGStateDepth.restype=HPDF_UINT
#--------------------------------------------------------------------------
#----- GRAPHICS OPERATORS -------------------------------------------------
#--- General graphics state ---------------------------------------------
# w
#HPDF_STATUS HPDF_Page_SetLineWidth (HPDF_Page page, HPDF_REAL line_width)
_HPDF_Page_SetLineWidth=haru.HPDF_Page_SetLineWidth
_HPDF_Page_SetLineWidth.restype=HPDF_STATUS
def HPDF_Page_SetLineWidth(
page, #HPDF_Page
line_width, #HPDF_REAL
):
line_width=HPDF_REAL(line_width)
return _HPDF_Page_SetLineWidth(
page, #HPDF_Page
line_width, #HPDF_REAL
)
# J
#HPDF_STATUS HPDF_Page_SetLineCap (HPDF_Page page, HPDF_LineCap line_cap)
HPDF_Page_SetLineCap=haru.HPDF_Page_SetLineCap
HPDF_Page_SetLineCap.restype=HPDF_STATUS
# j
#HPDF_STATUS HPDF_Page_SetLineJoin (HPDF_Page page, HPDF_LineJoin line_join)
HPDF_Page_SetLineJoin=haru.HPDF_Page_SetLineJoin
HPDF_Page_SetLineJoin.restype=HPDF_STATUS
# M
#HPDF_STATUS HPDF_Page_SetMiterLimit (HPDF_Page page, HPDF_REAL miter_limit)
_HPDF_Page_SetMiterLimit=haru.HPDF_Page_SetMiterLimit
_HPDF_Page_SetMiterLimit.restype=HPDF_STATUS
def HPDF_Page_SetMiterLimit(
page, #HPDF_Page
miter_limit, #HPDF_REAL
):
miter_limit=HPDF_REAL(miter_limit)
return _HPDF_Page_SetMiterLimit(
page, #HPDF_Page
miter_limit, #HPDF_REAL
)
# d
#HPDF_STATUS HPDF_Page_SetDash (HPDF_Page page, const HPDF_UINT16 *dash_ptn, HPDF_UINT num_param, HPDF_UINT phase)
_HPDF_Page_SetDash=haru.HPDF_Page_SetDash
_HPDF_Page_SetDash.restype=HPDF_STATUS
def HPDF_Page_SetDash(
page, #HPDF_Page
dash_ptn, #POINTER(HPDF_UINT16)
num_param, #HPDF_UINT
phase, #HPDF_UINT
):
if type(dash_ptn) in (types.ListType, types.TupleType):
num_param=len(dash_ptn)
dash_ptn=pointer((HPDF_UINT16*num_param)(*dash_ptn))
return _HPDF_Page_SetDash(
page, #HPDF_Page
dash_ptn, #POINTER(HPDF_UINT16)
num_param, #HPDF_UINT
phase, #HPDF_UINT
)
# ri --not implemented yet
# i
#HPDF_STATUS HPDF_Page_SetFlat (HPDF_Page page, HPDF_REAL flatness)
_HPDF_Page_SetFlat=haru.HPDF_Page_SetFlat
_HPDF_Page_SetFlat.restype=HPDF_STATUS
def HPDF_Page_SetFlat(
page, #HPDF_Page
flatness, #HPDF_REAL
):
flatness=HPDF_REAL(flatness)
return _HPDF_Page_SetFlat(
page, #HPDF_Page
flatness, #HPDF_REAL
)
# gs
#HPDF_STATUS HPDF_Page_SetExtGState (HPDF_Page page, HPDF_ExtGState ext_gstate)
HPDF_Page_SetExtGState=haru.HPDF_Page_SetExtGState
HPDF_Page_SetExtGState.restype=HPDF_STATUS
#--- Special graphic state operator --------------------------------------
# q
#HPDF_STATUS HPDF_Page_GSave (HPDF_Page page)
HPDF_Page_GSave=haru.HPDF_Page_GSave
HPDF_Page_GSave.restype=HPDF_STATUS
# Q
#HPDF_STATUS HPDF_Page_GRestore (HPDF_Page page)
HPDF_Page_GRestore=haru.HPDF_Page_GRestore
HPDF_Page_GRestore.restype=HPDF_STATUS
# cm
#HPDF_STATUS HPDF_Page_Concat (HPDF_Page page, HPDF_REAL a, HPDF_REAL b, HPDF_REAL c, HPDF_REAL d, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_Concat=haru.HPDF_Page_Concat
_HPDF_Page_Concat.restype=HPDF_STATUS
def HPDF_Page_Concat(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
):
a=HPDF_REAL(a)
b=HPDF_REAL(b)
c=HPDF_REAL(c)
d=HPDF_REAL(d)
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_Concat(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
)
#--- Path construction operator ------------------------------------------
# m
#HPDF_STATUS HPDF_Page_MoveTo (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTo=haru.HPDF_Page_MoveTo
_HPDF_Page_MoveTo.restype=HPDF_STATUS
def HPDF_Page_MoveTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# l
#HPDF_STATUS HPDF_Page_LineTo (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_LineTo=haru.HPDF_Page_LineTo
_HPDF_Page_LineTo.restype=HPDF_STATUS
def HPDF_Page_LineTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_LineTo(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# c
#HPDF_STATUS HPDF_Page_CurveTo (HPDF_Page page, HPDF_REAL x1, HPDF_REAL y1, HPDF_REAL x2, HPDF_REAL y2, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo=haru.HPDF_Page_CurveTo
_HPDF_Page_CurveTo.restype=HPDF_STATUS
def HPDF_Page_CurveTo(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x1=HPDF_REAL(x1)
y1=HPDF_REAL(y1)
x2=HPDF_REAL(x2)
y2=HPDF_REAL(y2)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# v
#HPDF_STATUS HPDF_Page_CurveTo2 (HPDF_Page page, HPDF_REAL x2, HPDF_REAL y2, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo2=haru.HPDF_Page_CurveTo2
_HPDF_Page_CurveTo2.restype=HPDF_STATUS
def HPDF_Page_CurveTo2(
page, #HPDF_Page
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x2=HPDF_REAL(x2)
y2=HPDF_REAL(y2)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo2(
page, #HPDF_Page
x2, #HPDF_REAL
y2, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# y
#HPDF_STATUS HPDF_Page_CurveTo3 (HPDF_Page page, HPDF_REAL x1, HPDF_REAL y1, HPDF_REAL x3, HPDF_REAL y3)
_HPDF_Page_CurveTo3=haru.HPDF_Page_CurveTo3
_HPDF_Page_CurveTo3.restype=HPDF_STATUS
def HPDF_Page_CurveTo3(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
):
x1=HPDF_REAL(x1)
y1=HPDF_REAL(y1)
x3=HPDF_REAL(x3)
y3=HPDF_REAL(y3)
return _HPDF_Page_CurveTo3(
page, #HPDF_Page
x1, #HPDF_REAL
y1, #HPDF_REAL
x3, #HPDF_REAL
y3, #HPDF_REAL
)
# h
#HPDF_STATUS HPDF_Page_ClosePath (HPDF_Page page)
HPDF_Page_ClosePath=haru.HPDF_Page_ClosePath
HPDF_Page_ClosePath.restype=HPDF_STATUS
# re
#HPDF_STATUS HPDF_Page_Rectangle (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL width, HPDF_REAL height)
_HPDF_Page_Rectangle=haru.HPDF_Page_Rectangle
_HPDF_Page_Rectangle.restype=HPDF_STATUS
def HPDF_Page_Rectangle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
width=HPDF_REAL(width)
height=HPDF_REAL(height)
return _HPDF_Page_Rectangle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
)
#--- Path painting operator ---------------------------------------------
# S
#HPDF_STATUS HPDF_Page_Stroke (HPDF_Page page)
_HPDF_Page_Stroke=haru.HPDF_Page_Stroke
_HPDF_Page_Stroke.restype=HPDF_STATUS
def HPDF_Page_Stroke(
page, #HPDF_Page
):
return _HPDF_Page_Stroke(
page, #HPDF_Page
)
# s
#HPDF_STATUS HPDF_Page_ClosePathStroke (HPDF_Page page)
HPDF_Page_ClosePathStroke=haru.HPDF_Page_ClosePathStroke
HPDF_Page_ClosePathStroke.restype=HPDF_STATUS
# f
#HPDF_STATUS HPDF_Page_Fill (HPDF_Page page)
HPDF_Page_Fill=haru.HPDF_Page_Fill
HPDF_Page_Fill.restype=HPDF_STATUS
# f*
#HPDF_STATUS HPDF_Page_Eofill (HPDF_Page page)
HPDF_Page_Eofill=haru.HPDF_Page_Eofill
HPDF_Page_Eofill.restype=HPDF_STATUS
# B
#HPDF_STATUS HPDF_Page_FillStroke (HPDF_Page page)
HPDF_Page_FillStroke=haru.HPDF_Page_FillStroke
HPDF_Page_FillStroke.restype=HPDF_STATUS
# B*
#HPDF_STATUS HPDF_Page_EofillStroke (HPDF_Page page)
HPDF_Page_EofillStroke=haru.HPDF_Page_EofillStroke
HPDF_Page_EofillStroke.restype=HPDF_STATUS
# b
#HPDF_STATUS HPDF_Page_ClosePathFillStroke (HPDF_Page page)
HPDF_Page_ClosePathFillStroke=haru.HPDF_Page_ClosePathFillStroke
HPDF_Page_ClosePathFillStroke.restype=HPDF_STATUS
# b*
#HPDF_STATUS HPDF_Page_ClosePathEofillStroke (HPDF_Page page)
HPDF_Page_ClosePathEofillStroke=haru.HPDF_Page_ClosePathEofillStroke
HPDF_Page_ClosePathEofillStroke.restype=HPDF_STATUS
# n
#HPDF_STATUS HPDF_Page_EndPath (HPDF_Page page)
HPDF_Page_EndPath=haru.HPDF_Page_EndPath
HPDF_Page_EndPath.restype=HPDF_STATUS
#--- Clipping paths operator --------------------------------------------
# W
#HPDF_STATUS HPDF_Page_Clip (HPDF_Page page)
HPDF_Page_Clip=haru.HPDF_Page_Clip
HPDF_Page_Clip.restype=HPDF_STATUS
# W*
#HPDF_STATUS HPDF_Page_Eoclip (HPDF_Page page)
HPDF_Page_Eoclip=haru.HPDF_Page_Eoclip
HPDF_Page_Eoclip.restype=HPDF_STATUS
#--- Text object operator -----------------------------------------------
# BT
#HPDF_STATUS HPDF_Page_BeginText (HPDF_Page page)
HPDF_Page_BeginText=haru.HPDF_Page_BeginText
HPDF_Page_BeginText.restype=HPDF_STATUS
# ET
#HPDF_STATUS HPDF_Page_EndText (HPDF_Page page)
HPDF_Page_EndText=haru.HPDF_Page_EndText
HPDF_Page_EndText.restype=HPDF_STATUS
#--- Text state ---------------------------------------------------------
# Tc
#HPDF_STATUS HPDF_Page_SetCharSpace (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetCharSpace=haru.HPDF_Page_SetCharSpace
_HPDF_Page_SetCharSpace.restype=HPDF_STATUS
def HPDF_Page_SetCharSpace(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetCharSpace(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tw
#HPDF_STATUS HPDF_Page_SetWordSpace (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetWordSpace=haru.HPDF_Page_SetWordSpace
_HPDF_Page_SetWordSpace.restype=HPDF_STATUS
def HPDF_Page_SetWordSpace(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetWordSpace(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tz
#HPDF_STATUS HPDF_Page_SetHorizontalScalling (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetHorizontalScalling=haru.HPDF_Page_SetHorizontalScalling
_HPDF_Page_SetHorizontalScalling.restype=HPDF_STATUS
def HPDF_Page_SetHorizontalScalling(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetHorizontalScalling(
page, #HPDF_Page
value, #HPDF_REAL
)
# TL
#HPDF_STATUS HPDF_Page_SetTextLeading (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextLeading=haru.HPDF_Page_SetTextLeading
_HPDF_Page_SetTextLeading.restype=HPDF_STATUS
def HPDF_Page_SetTextLeading(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextLeading(
page, #HPDF_Page
value, #HPDF_REAL
)
# Tf
#HPDF_STATUS HPDF_Page_SetFontAndSize (HPDF_Page page, HPDF_Font font, HPDF_REAL size)
_HPDF_Page_SetFontAndSize=haru.HPDF_Page_SetFontAndSize
_HPDF_Page_SetFontAndSize.restype=HPDF_STATUS
def HPDF_Page_SetFontAndSize(
page, #HPDF_Page
font, #HPDF_Font
size, #HPDF_REAL
):
size=HPDF_REAL(size)
return _HPDF_Page_SetFontAndSize(
page, #HPDF_Page
font, #HPDF_Font
size, #HPDF_REAL
)
# Tr
#HPDF_STATUS HPDF_Page_SetTextRenderingMode (HPDF_Page page, HPDF_TextRenderingMode mode)
HPDF_Page_SetTextRenderingMode=haru.HPDF_Page_SetTextRenderingMode
HPDF_Page_SetTextRenderingMode.restype=HPDF_STATUS
# Ts
#HPDF_STATUS HPDF_Page_SetTextRise (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextRise=haru.HPDF_Page_SetTextRise
_HPDF_Page_SetTextRise.restype=HPDF_STATUS
def HPDF_Page_SetTextRise(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextRise(
page, #HPDF_Page
value, #HPDF_REAL
)
# This function is obsolete. Use HPDF_Page_SetTextRise.
#HPDF_STATUS HPDF_Page_SetTextRaise (HPDF_Page page, HPDF_REAL value)
_HPDF_Page_SetTextRaise=haru.HPDF_Page_SetTextRaise
_HPDF_Page_SetTextRaise.restype=HPDF_STATUS
def HPDF_Page_SetTextRaise(
page, #HPDF_Page
value, #HPDF_REAL
):
value=HPDF_REAL(value)
return _HPDF_Page_SetTextRaise(
page, #HPDF_Page
value, #HPDF_REAL
)
#--- Text positioning ---------------------------------------------------
# Td
#HPDF_STATUS HPDF_Page_MoveTextPos (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTextPos=haru.HPDF_Page_MoveTextPos
_HPDF_Page_MoveTextPos.restype=HPDF_STATUS
def HPDF_Page_MoveTextPos(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTextPos(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# TD
#HPDF_STATUS HPDF_Page_MoveTextPos2 (HPDF_Page page, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_MoveTextPos2=haru.HPDF_Page_MoveTextPos2
_HPDF_Page_MoveTextPos2.restype=HPDF_STATUS
def HPDF_Page_MoveTextPos2(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_MoveTextPos2(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
)
# Tm
#HPDF_STATUS HPDF_Page_SetTextMatrix (HPDF_Page page, HPDF_REAL a, HPDF_REAL b, HPDF_REAL c, HPDF_REAL d, HPDF_REAL x, HPDF_REAL y)
_HPDF_Page_SetTextMatrix=haru.HPDF_Page_SetTextMatrix
_HPDF_Page_SetTextMatrix.restype=HPDF_STATUS
def HPDF_Page_SetTextMatrix(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
):
a=HPDF_REAL(a)
b=HPDF_REAL(b)
c=HPDF_REAL(c)
d=HPDF_REAL(d)
x=HPDF_REAL(x)
y=HPDF_REAL(y)
return _HPDF_Page_SetTextMatrix(
page, #HPDF_Page
a, #HPDF_REAL
b, #HPDF_REAL
c, #HPDF_REAL
d, #HPDF_REAL
x, #HPDF_REAL
y, #HPDF_REAL
)
# T*
#HPDF_STATUS HPDF_Page_MoveToNextLine (HPDF_Page page)
HPDF_Page_MoveToNextLine=haru.HPDF_Page_MoveToNextLine
HPDF_Page_MoveToNextLine.restype=HPDF_STATUS
#--- Text showing -------------------------------------------------------
# Tj
#HPDF_STATUS HPDF_Page_ShowText (HPDF_Page page, const char *text)
_HPDF_Page_ShowText=haru.HPDF_Page_ShowText
_HPDF_Page_ShowText.restype=HPDF_STATUS
def HPDF_Page_ShowText(page,
text
):
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_ShowText(page,
text
)
# TJ
# '
#HPDF_STATUS HPDF_Page_ShowTextNextLine (HPDF_Page page, const char *text)
HPDF_Page_ShowTextNextLine=haru.HPDF_Page_ShowTextNextLine
HPDF_Page_ShowTextNextLine.restype=HPDF_STATUS
# "
#HPDF_STATUS HPDF_Page_ShowTextNextLineEx (HPDF_Page page, HPDF_REAL word_space, HPDF_REAL char_space, const char *text)
_HPDF_Page_ShowTextNextLineEx=haru.HPDF_Page_ShowTextNextLineEx
_HPDF_Page_ShowTextNextLineEx.restype=HPDF_STATUS
def HPDF_Page_ShowTextNextLineEx(
page, #HPDF_Page
word_space, #HPDF_REAL
char_space, #HPDF_REAL
text, #c_char_p
):
word_space=HPDF_REAL(word_space)
char_space=HPDF_REAL(char_space)
return _HPDF_Page_ShowTextNextLineEx(
page, #HPDF_Page
word_space, #HPDF_REAL
char_space, #HPDF_REAL
text, #c_char_p
)
#--- Color showing ------------------------------------------------------
# cs --not implemented yet
# CS --not implemented yet
# sc --not implemented yet
# scn --not implemented yet
# SC --not implemented yet
# SCN --not implemented yet
# g
#HPDF_STATUS HPDF_Page_SetGrayFill (HPDF_Page page, HPDF_REAL gray)
_HPDF_Page_SetGrayFill=haru.HPDF_Page_SetGrayFill
_HPDF_Page_SetGrayFill.restype=HPDF_STATUS
def HPDF_Page_SetGrayFill(
page, #HPDF_Page
gray, #HPDF_REAL
):
gray=HPDF_REAL(gray)
return _HPDF_Page_SetGrayFill(
page, #HPDF_Page
gray, #HPDF_REAL
)
# G
#HPDF_STATUS HPDF_Page_SetGrayStroke (HPDF_Page page, HPDF_REAL gray)
_HPDF_Page_SetGrayStroke=haru.HPDF_Page_SetGrayStroke
_HPDF_Page_SetGrayStroke.restype=HPDF_STATUS
def HPDF_Page_SetGrayStroke(
page, #HPDF_Page
gray, #HPDF_REAL
):
gray=HPDF_REAL(gray)
return _HPDF_Page_SetGrayStroke(
page, #HPDF_Page
gray, #HPDF_REAL
)
# rg
#HPDF_STATUS HPDF_Page_SetRGBFill (HPDF_Page page, HPDF_REAL r, HPDF_REAL g, HPDF_REAL b)
_HPDF_Page_SetRGBFill=haru.HPDF_Page_SetRGBFill
_HPDF_Page_SetRGBFill.restype=HPDF_STATUS
def HPDF_Page_SetRGBFill(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
):
r=HPDF_REAL(r)
g=HPDF_REAL(g)
b=HPDF_REAL(b)
return _HPDF_Page_SetRGBFill(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
)
# RG
#HPDF_STATUS HPDF_Page_SetRGBStroke (HPDF_Page page, HPDF_REAL r, HPDF_REAL g, HPDF_REAL b)
_HPDF_Page_SetRGBStroke=haru.HPDF_Page_SetRGBStroke
_HPDF_Page_SetRGBStroke.restype=HPDF_STATUS
def HPDF_Page_SetRGBStroke(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
):
r=HPDF_REAL(r)
g=HPDF_REAL(g)
b=HPDF_REAL(b)
return _HPDF_Page_SetRGBStroke(
page, #HPDF_Page
r, #HPDF_REAL
g, #HPDF_REAL
b, #HPDF_REAL
)
# k
#HPDF_STATUS HPDF_Page_SetCMYKFill (HPDF_Page page, HPDF_REAL c, HPDF_REAL m, HPDF_REAL y, HPDF_REAL k)
_HPDF_Page_SetCMYKFill=haru.HPDF_Page_SetCMYKFill
_HPDF_Page_SetCMYKFill.restype=HPDF_STATUS
def HPDF_Page_SetCMYKFill(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
):
c=HPDF_REAL(c)
m=HPDF_REAL(m)
y=HPDF_REAL(y)
k=HPDF_REAL(k)
return _HPDF_Page_SetCMYKFill(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
)
# K
#HPDF_STATUS HPDF_Page_SetCMYKStroke (HPDF_Page page, HPDF_REAL c, HPDF_REAL m, HPDF_REAL y, HPDF_REAL k)
_HPDF_Page_SetCMYKStroke=haru.HPDF_Page_SetCMYKStroke
_HPDF_Page_SetCMYKStroke.restype=HPDF_STATUS
def HPDF_Page_SetCMYKStroke(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
):
c=HPDF_REAL(c)
m=HPDF_REAL(m)
y=HPDF_REAL(y)
k=HPDF_REAL(k)
return _HPDF_Page_SetCMYKStroke(
page, #HPDF_Page
c, #HPDF_REAL
m, #HPDF_REAL
y, #HPDF_REAL
k, #HPDF_REAL
)
#--- Shading patterns ---------------------------------------------------
# sh --not implemented yet
#--- In-line images -----------------------------------------------------
# BI --not implemented yet
# ID --not implemented yet
# EI --not implemented yet
#--- XObjects -----------------------------------------------------------
# Do
#HPDF_STATUS HPDF_Page_ExecuteXObject (HPDF_Page page, HPDF_XObject obj)
HPDF_Page_ExecuteXObject=haru.HPDF_Page_ExecuteXObject
HPDF_Page_ExecuteXObject.restype=HPDF_STATUS
#--- Marked content -----------------------------------------------------
# BMC --not implemented yet
# BDC --not implemented yet
# EMC --not implemented yet
# MP --not implemented yet
# DP --not implemented yet
#--- Compatibility ------------------------------------------------------
# BX --not implemented yet
# EX --not implemented yet
#HPDF_STATUS HPDF_Page_DrawImage (HPDF_Page page, HPDF_Image image, HPDF_REAL x, HPDF_REAL y, HPDF_REAL width, HPDF_REAL height)
_HPDF_Page_DrawImage=haru.HPDF_Page_DrawImage
_HPDF_Page_DrawImage.restype=HPDF_STATUS
def HPDF_Page_DrawImage(
page, #HPDF_Page
image, #HPDF_Image
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
width=HPDF_REAL(width)
height=HPDF_REAL(height)
return _HPDF_Page_DrawImage(
page, #HPDF_Page
image, #HPDF_Image
x, #HPDF_REAL
y, #HPDF_REAL
width, #HPDF_REAL
height, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Circle (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL ray)
_HPDF_Page_Circle=haru.HPDF_Page_Circle
_HPDF_Page_Circle.restype=HPDF_STATUS
def HPDF_Page_Circle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
ray=HPDF_REAL(ray)
return _HPDF_Page_Circle(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Ellipse (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL xray, HPDF_REAL yray)
_HPDF_Page_Ellipse=haru.HPDF_Page_Ellipse
_HPDF_Page_Ellipse.restype=HPDF_STATUS
def HPDF_Page_Ellipse(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
xray, #HPDF_REAL
yray, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
xray=HPDF_REAL(xray)
yray=HPDF_REAL(yray)
return _HPDF_Page_Ellipse(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
xray, #HPDF_REAL
yray, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_Arc (HPDF_Page page, HPDF_REAL x, HPDF_REAL y, HPDF_REAL ray, HPDF_REAL ang1, HPDF_REAL ang2)
_HPDF_Page_Arc=haru.HPDF_Page_Arc
_HPDF_Page_Arc.restype=HPDF_STATUS
def HPDF_Page_Arc(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
ang1, #HPDF_REAL
ang2, #HPDF_REAL
):
x=HPDF_REAL(x)
y=HPDF_REAL(y)
ray=HPDF_REAL(ray)
ang1=HPDF_REAL(ang1)
ang2=HPDF_REAL(ang2)
return _HPDF_Page_Arc(
page, #HPDF_Page
x, #HPDF_REAL
y, #HPDF_REAL
ray, #HPDF_REAL
ang1, #HPDF_REAL
ang2, #HPDF_REAL
)
#HPDF_STATUS HPDF_Page_TextOut (HPDF_Page page, HPDF_REAL xpos, HPDF_REAL ypos, const char *text)
_HPDF_Page_TextOut=haru.HPDF_Page_TextOut
_HPDF_Page_TextOut.restype=HPDF_STATUS
def HPDF_Page_TextOut(
page, #HPDF_Page
xpos, #HPDF_REAL
ypos, #HPDF_REAL
text, #c_char_p
):
xpos=HPDF_REAL(xpos)
ypos=HPDF_REAL(ypos)
if type(text) in (types.ListType, types.TupleType):
if type(text[-1]) != types.StringType:
text=[chr(i) for i in text]
text=''.join(text)
return _HPDF_Page_TextOut(
page, #HPDF_Page
xpos, #HPDF_REAL
ypos, #HPDF_REAL
text, #c_char_p
)
#HPDF_STATUS HPDF_Page_TextRect (HPDF_Page page, HPDF_REAL left, HPDF_REAL top, HPDF_REAL right, HPDF_REAL bottom, const char *text, HPDF_TextAlignment align, HPDF_UINT *len)
#???
_HPDF_Page_TextRect=haru.HPDF_Page_TextRect
_HPDF_Page_TextRect.restype=HPDF_STATUS
def HPDF_Page_TextRect(
page, #HPDF_Page
left, #HPDF_REAL
top, #HPDF_REAL
right, #HPDF_REAL
bottom, #HPDF_REAL
text, #c_char_p
align, #HPDF_TextAlignment
length, #POINTER(HPDF_UINT)
):
left=HPDF_REAL(left)
top=HPDF_REAL(top)
right=HPDF_REAL(right)
bottom=HPDF_REAL(bottom)
if type(length) in (types.ListType, types.TupleType):
size=len(length)
length=pointer((HPDF_UINT*size)(*length))
return _HPDF_Page_TextRect(
page, #HPDF_Page
left, #HPDF_REAL
top, #HPDF_REAL
right, #HPDF_REAL
bottom, #HPDF_REAL
text, #c_char_p
align, #HPDF_TextAlignment
length, #POINTER(HPDF_UINT)
)
#HPDF_STATUS HPDF_Page_SetSlideShow (HPDF_Page page, HPDF_TransitionStyle type, HPDF_REAL disp_time, HPDF_REAL trans_time)
_HPDF_Page_SetSlideShow=haru.HPDF_Page_SetSlideShow
_HPDF_Page_SetSlideShow.restype=HPDF_STATUS
def HPDF_Page_SetSlideShow(
page, #HPDF_Page
tType, #HPDF_TransitionStyle
disp_time, #HPDF_REAL
trans_time, #HPDF_REAL
):
disp_time=HPDF_REAL(disp_time)
trans_time=HPDF_REAL(trans_time)
return _HPDF_Page_SetSlideShow(
page, #HPDF_Page
tType, #HPDF_TransitionStyle
disp_time, #HPDF_REAL
trans_time, #HPDF_REAL
)
NULL=0
HPDF_NOPNGLIB=False
| Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- png_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_image (pdf, filename, x, y, text):
page = HPDF_GetCurrentPage (pdf)
filename1= "pngsuite/%s" % filename
image = HPDF_LoadPngImageFromFile (pdf, filename1)
# Draw image to the canvas.
HPDF_Page_DrawImage (page, image, x, y, HPDF_Image_GetWidth (image),
HPDF_Image_GetHeight (image))
# Print the text.
HPDF_Page_BeginText (page)
HPDF_Page_SetTextLeading (page, 16)
HPDF_Page_MoveTextPos (page, x, y)
HPDF_Page_ShowTextNextLine (page, filename)
HPDF_Page_ShowTextNextLine (page, text)
HPDF_Page_EndText (page)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 550)
HPDF_Page_SetHeight (page, 650)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ (dst, 0, HPDF_Page_GetHeight (page), 1)
HPDF_SetOpenAction(pdf, dst)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "PngDemo")
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, 12)
draw_image (pdf, "basn0g01.png", 100, HPDF_Page_GetHeight (page) - 150,
"1bit grayscale.")
draw_image (pdf, "basn0g02.png", 200, HPDF_Page_GetHeight (page) - 150,
"2bit grayscale.")
draw_image (pdf, "basn0g04.png", 300, HPDF_Page_GetHeight (page) - 150,
"4bit grayscale.")
draw_image (pdf, "basn0g08.png", 400, HPDF_Page_GetHeight (page) - 150,
"8bit grayscale.")
draw_image (pdf, "basn2c08.png", 100, HPDF_Page_GetHeight (page) - 250,
"8bit color.")
draw_image (pdf, "basn2c16.png", 200, HPDF_Page_GetHeight (page) - 250,
"16bit color.")
draw_image (pdf, "basn3p01.png", 100, HPDF_Page_GetHeight (page) - 350,
"1bit pallet.")
draw_image (pdf, "basn3p02.png", 200, HPDF_Page_GetHeight (page) - 350,
"2bit pallet.")
draw_image (pdf, "basn3p04.png", 300, HPDF_Page_GetHeight (page) - 350,
"4bit pallet.")
draw_image (pdf, "basn3p08.png", 400, HPDF_Page_GetHeight (page) - 350,
"8bit pallet.")
draw_image (pdf, "basn4a08.png", 100, HPDF_Page_GetHeight (page) - 450,
"8bit alpha.")
draw_image (pdf, "basn4a16.png", 200, HPDF_Page_GetHeight (page) - 450,
"16bit alpha.")
draw_image (pdf, "basn6a08.png", 100, HPDF_Page_GetHeight (page) - 550,
"8bit alpha.")
draw_image (pdf, "basn6a16.png", 200, HPDF_Page_GetHeight (page) - 550,
"16bit alpha.")
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
if HPDF_NOPNGLIB:
printf("WARNING: if you want to run this demo, \n"
"make libhpdf with HPDF_USE_PNGLIB option.\n")
sys.exit(1)
else:
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- permission.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
text = "User cannot print and copy this document."
owner_passwd = "owner"
user_passwd = ""
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetSize (page, HPDF_PAGE_SIZE_B5, HPDF_PAGE_LANDSCAPE)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
tw = HPDF_Page_TextWidth (page, text)
HPDF_Page_MoveTextPos (page, (HPDF_Page_GetWidth (page) - tw) / 2,
(HPDF_Page_GetHeight (page) - 20) / 2)
HPDF_Page_ShowText (page, text)
HPDF_Page_EndText (page)
HPDF_SetPassword (pdf, owner_passwd, user_passwd)
HPDF_SetPermission (pdf, HPDF_ENABLE_READ)
HPDF_SetEncryptionMode (pdf, HPDF_ENCRYPT_R3, 16)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- grid_sheet.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_grid (pdf, page):
height = HPDF_Page_GetHeight (page)
width = HPDF_Page_GetWidth (page)
font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, font, 5)
HPDF_Page_SetGrayFill (page, 0.5)
HPDF_Page_SetGrayStroke (page, 0.8)
# Draw horizontal lines
y = 0
while (y < height):
if (y % 10 == 0):
HPDF_Page_SetLineWidth (page, 0.5)
else:
if (HPDF_Page_GetLineWidth (page) != 0.25):
HPDF_Page_SetLineWidth (page, 0.25)
HPDF_Page_MoveTo (page, 0, y)
HPDF_Page_LineTo (page, width, y)
HPDF_Page_Stroke (page)
if (y % 10 == 0 and y > 0):
HPDF_Page_SetGrayStroke (page, 0.5)
HPDF_Page_MoveTo (page, 0, y)
HPDF_Page_LineTo (page, 5, y)
HPDF_Page_Stroke (page)
HPDF_Page_SetGrayStroke (page, 0.8)
y += 5
# Draw virtical lines
x = 0
while (x < width):
if (x % 10 == 0):
HPDF_Page_SetLineWidth (page, 0.5)
else:
if (HPDF_Page_GetLineWidth (page) != 0.25):
HPDF_Page_SetLineWidth (page, 0.25)
HPDF_Page_MoveTo (page, x, 0)
HPDF_Page_LineTo (page, x, height)
HPDF_Page_Stroke (page)
if (x % 50 == 0 and x > 0):
HPDF_Page_SetGrayStroke (page, 0.5)
HPDF_Page_MoveTo (page, x, 0)
HPDF_Page_LineTo (page, x, 5)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, x, height)
HPDF_Page_LineTo (page, x, height - 5)
HPDF_Page_Stroke (page)
HPDF_Page_SetGrayStroke (page, 0.8)
x += 5
# Draw horizontal text
y = 0
while (y < height):
if (y % 10 == 0 and y > 0):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 5, y - 2)
buf="%u" % y
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
y += 5
# Draw virtical text
x = 0
while (x < width):
if (x % 50 == 0 and x > 0):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, 5)
buf="%u" % x
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, height - 10)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
x += 5
HPDF_Page_SetGrayFill (page, 0)
HPDF_Page_SetGrayStroke (page, 0)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetHeight (page, 600)
HPDF_Page_SetWidth (page, 400)
print_grid (pdf, page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
if __name__=='__main__':
main()
__all__=['print_grid'] | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ttfont_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 2):
printf ("ttfont_jp_demo <ttf-font-filename> [-E]\n")
printf ("ttfont_jp_demo <ttc-font-filename> <index> [-E]\n")
return 1
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseJPEncodings (pdf)
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# load ttc file
if len(sys.argv) == 4 and sys.argv[3]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_TRUE)
elif len(sys.argv) == 3 and sys.argv[2]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_TRUE)
elif len(sys.argv) == 3:
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_FALSE)
else:
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_FALSE)
# add a new page object.
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
detail_font = HPDF_GetFont (pdf, detail_font_name, "90msp-RKSJ-H")
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, detail_font_name)
HPDF_Page_ShowText (page, " (")
HPDF_Page_ShowText (page, HPDF_Font_GetEncodingName (detail_font))
HPDF_Page_ShowText (page, ")")
HPDF_Page_SetFontAndSize (page, detail_font, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font, 10)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font, 16)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font, 23)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font, 30)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
pw = HPDF_Page_TextWidth (page, SAMP_TXT)
page_height = 210
page_width = pw + 40
HPDF_Page_SetWidth (page, page_width)
HPDF_Page_SetHeight (page, page_height)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, 10, page_height - 25)
HPDF_Page_LineTo (page, page_width - 10, page_height - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, page_height - 85)
HPDF_Page_LineTo (page, page_width - 10, page_height - 85)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
#coding=ISO8859-2
###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 800)
HPDF_Page_SetHeight (page, 800)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 30, 740)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 30)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 30)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 30)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutline (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutline (pdf, root, "page2", NULL)
# create outline with test which is ISO8859-2 encoding
#outline[2] = HPDF_CreateOutline (pdf, root, "ISO8859-2 text 釉罩棕?,
# HPDF_GetEncoder (pdf, "ISO8859-2"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
#HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- jpfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main():
global pdf
chinesefonts='''
SimSun
SimSun,Bold
SimSun,Italic
SimSun,BoldItalic
SimHei
SimHei,Bold
SimHei,Italic
SimHei,BoldItalic
'''
chinesefonts=chinesefonts.split('\n')
chinesefonts=[i for i in chinesefonts if i]
detail_font=[]
PAGE_HEIGHT = 210
try:
f = open ("mbtext/cp936.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
samp_text=f.read(2048)
f.close ()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# configure pdf-document to be compressed.
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# declaration for using Japanese font, encoding.
HPDF_UseCNSEncodings (pdf)
HPDF_UseCNSFonts (pdf)
for i in chinesefonts:
detail_font.append( HPDF_GetFont (pdf, i, "GB-EUC-H"))
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "JP font demo", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
for i in detail_font:
# add a new page object.
page = HPDF_AddPage (pdf)
# create outline entry
outline = HPDF_CreateOutline (pdf, root,
HPDF_Font_GetFontName (i), NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Outline_SetDestination(outline, dst)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, HPDF_Font_GetFontName (i))
HPDF_Page_SetFontAndSize (page, i, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, i, 10)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, i, 16)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, i, 23)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, i, 30)
HPDF_Page_ShowText (page, samp_text)
p = HPDF_Page_GetCurrentTextPos (page)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
x_pos = 20
for j in range(len (samp_text) // 2):
HPDF_Page_MoveTo (page, x_pos, p.y - 10)
HPDF_Page_LineTo (page, x_pos, p.y - 12)
HPDF_Page_Stroke (page)
x_pos = x_pos + 30
HPDF_Page_SetWidth (page, p.x + 20)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 25)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 85)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 85)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, p.y - 12)
HPDF_Page_LineTo (page, p.x + 10, p.y - 12)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- encryption.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
text = "This is an encrypt document example."
owner_passwd = "owner"
user_passwd = "user"
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetSize (page, HPDF_PAGE_SIZE_B5, HPDF_PAGE_LANDSCAPE)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
tw = HPDF_Page_TextWidth (page, text)
HPDF_Page_MoveTextPos (page, (HPDF_Page_GetWidth (page) - tw) / 2,
(HPDF_Page_GetHeight (page) - 20) / 2)
HPDF_Page_ShowText (page, text)
HPDF_Page_EndText (page)
HPDF_SetPassword (pdf, owner_passwd, user_passwd)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ext_gstate_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_circles (page, description, x, y):
HPDF_Page_SetLineWidth (page, 1.0)
HPDF_Page_SetRGBStroke (page, 0.0, 0.0, 0.0)
HPDF_Page_SetRGBFill (page, 1.0, 0.0, 0.0)
HPDF_Page_Circle (page, x + 40, y + 40, 40)
HPDF_Page_ClosePathFillStroke (page)
HPDF_Page_SetRGBFill (page, 0.0, 1.0, 0.0)
HPDF_Page_Circle (page, x + 100, y + 40, 40)
HPDF_Page_ClosePathFillStroke (page)
HPDF_Page_SetRGBFill (page, 0.0, 0.0, 1.0)
HPDF_Page_Circle (page, x + 70, y + 74.64, 40)
HPDF_Page_ClosePathFillStroke (page)
HPDF_Page_SetRGBFill (page, 0.0, 0.0, 0.0)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, x + 0.0, y + 130.0, description)
HPDF_Page_EndText (page)
def main ():
global pdf
PAGE_WIDTH = 600
PAGE_HEIGHT = 900
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
hfont = HPDF_GetFont (pdf, "Helvetica-Bold", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page, hfont, 10)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
HPDF_Page_SetWidth (page, PAGE_WIDTH)
# normal
HPDF_Page_GSave (page)
draw_circles (page, "normal", 40.0, PAGE_HEIGHT - 170)
HPDF_Page_GRestore (page)
# transparency (0.8)
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetAlphaFill (gstate, 0.8)
HPDF_ExtGState_SetAlphaStroke (gstate, 0.8)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "alpha fill = 0.8", 230.0, PAGE_HEIGHT - 170)
HPDF_Page_GRestore (page)
# transparency (0.4)
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetAlphaFill (gstate, 0.4)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "alpha fill = 0.4", 420.0, PAGE_HEIGHT - 170)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_MULTIPLY
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_MULTIPLY)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_MULTIPLY", 40.0, PAGE_HEIGHT - 340)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_SCREEN
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_SCREEN)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_SCREEN", 230.0, PAGE_HEIGHT - 340)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_OVERLAY
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_OVERLAY)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_OVERLAY", 420.0, PAGE_HEIGHT - 340)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_DARKEN
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_DARKEN)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_DARKEN", 40.0, PAGE_HEIGHT - 510)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_LIGHTEN
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_LIGHTEN)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_LIGHTEN", 230.0, PAGE_HEIGHT - 510)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_COLOR_DODGE
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_COLOR_DODGE)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_COLOR_DODGE", 420.0, PAGE_HEIGHT - 510)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_COLOR_BUM
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_COLOR_BUM)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_COLOR_BUM", 40.0, PAGE_HEIGHT - 680)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_HARD_LIGHT
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_HARD_LIGHT)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_HARD_LIGHT", 230.0, PAGE_HEIGHT - 680)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_SOFT_LIGHT
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_SOFT_LIGHT)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_SOFT_LIGHT", 420.0, PAGE_HEIGHT - 680)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_DIFFERENCE
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_DIFFERENCE)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_DIFFERENCE", 40.0, PAGE_HEIGHT - 850)
HPDF_Page_GRestore (page)
# blend-mode=HPDF_BM_EXCLUSHON
HPDF_Page_GSave (page)
gstate = HPDF_CreateExtGState (pdf)
HPDF_ExtGState_SetBlendMode (gstate, HPDF_BM_EXCLUSHON)
HPDF_Page_SetExtGState (page, gstate)
draw_circles (page, "HPDF_BM_EXCLUSHON", 230.0, PAGE_HEIGHT - 850)
HPDF_Page_GRestore (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- raw_image_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
RAW_IMAGE_DATA=[
0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfc,
0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xf0,
0xf3, 0xf3, 0xff, 0xe0, 0xf3, 0xf3, 0xff, 0xc0,
0xf3, 0xf3, 0xff, 0x80, 0xf3, 0x33, 0xff, 0x00,
0xf3, 0x33, 0xfe, 0x00, 0xf3, 0x33, 0xfc, 0x00,
0xf8, 0x07, 0xf8, 0x00, 0xf8, 0x07, 0xf0, 0x00,
0xfc, 0xcf, 0xe0, 0x00, 0xfc, 0xcf, 0xc0, 0x00,
0xff, 0xff, 0x80, 0x00, 0xff, 0xff, 0x00, 0x00,
0xff, 0xfe, 0x00, 0x00, 0xff, 0xfc, 0x00, 0x00,
0xff, 0xf8, 0x0f, 0xe0, 0xff, 0xf0, 0x0f, 0xe0,
0xff, 0xe0, 0x0c, 0x30, 0xff, 0xc0, 0x0c, 0x30,
0xff, 0x80, 0x0f, 0xe0, 0xff, 0x00, 0x0f, 0xe0,
0xfe, 0x00, 0x0c, 0x30, 0xfc, 0x00, 0x0c, 0x30,
0xf8, 0x00, 0x0f, 0xe0, 0xf0, 0x00, 0x0f, 0xe0,
0xe0, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
]
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 172)
HPDF_Page_SetHeight (page, 80)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "RawImageDemo")
HPDF_Page_EndText (page)
# load RGB raw-image file.
image = HPDF_LoadRawImageFromFile (pdf, "rawimage/32_32_rgb.dat",
32, 32, HPDF_CS_DEVICE_RGB)
x = 20
y = 20
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, 32, 32)
# load GrayScale raw-image file.
image = HPDF_LoadRawImageFromFile (pdf, "rawimage/32_32_gray.dat",
32, 32, HPDF_CS_DEVICE_GRAY)
x = 70
y = 20
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, 32, 32)
# load GrayScale raw-image (1bit) file from memory.
image = HPDF_LoadRawImageFromMem (pdf, RAW_IMAGE_DATA, 32, 32,
HPDF_CS_DEVICE_GRAY, 1)
x = 120
y = 20
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, 32, 32)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- jpeg_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_image (pdf, filename, x, y, text):
page = HPDF_GetCurrentPage (pdf)
filename1="images/%s" % filename
image = HPDF_LoadJpegImageFromFile (pdf, filename1)
# Draw image to the canvas.
HPDF_Page_DrawImage (page, image, x, y, HPDF_Image_GetWidth (image),
HPDF_Image_GetHeight (image))
# Print the text.
HPDF_Page_BeginText (page)
HPDF_Page_SetTextLeading (page, 16)
HPDF_Page_MoveTextPos (page, x, y)
HPDF_Page_ShowTextNextLine (page, filename)
HPDF_Page_ShowTextNextLine (page, text)
HPDF_Page_EndText (page)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 650)
HPDF_Page_SetHeight (page, 500)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ (dst, 0, HPDF_Page_GetHeight (page), 1)
HPDF_SetOpenAction(pdf, dst)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "JpegDemo")
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, 12)
draw_image (pdf, "rgb.jpg", 70, HPDF_Page_GetHeight (page) - 410,
"24bit color image")
draw_image (pdf, "gray.jpg", 340, HPDF_Page_GetHeight (page) - 410,
"8bit grayscale image")
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
for i in dir():
if 'CreateOutLine' in i:
print i
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 200)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close ()
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseJPEncodings (pdf)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 20)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 20)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 20)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutLine (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutLine (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutLine (pdf, root, "page2", NULL)
# create outline with test which is encoding
outline[2] = HPDF_CreateOutLine (pdf, root, SAMP_TXT,
HPDF_GetEncoder (pdf, "90ms-RKSJ-H"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
# HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- text_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
from math import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def show_stripe_pattern (page, x, y):
iy = 0
while (iy < 50):
HPDF_Page_SetRGBStroke (page, 0.0, 0.0, 0.5)
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_MoveTo (page, x, y + iy)
HPDF_Page_LineTo (page, x + HPDF_Page_TextWidth (page, "ABCabc123"),
y + iy)
HPDF_Page_Stroke (page)
iy += 3
HPDF_Page_SetLineWidth (page, 2.5)
def show_description (page, x, y, text):
fsize = HPDF_Page_GetCurrentFontSize (page)
font = HPDF_Page_GetCurrentFont (page)
c = HPDF_Page_GetRGBFill (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, x, y - 12, text)
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, fsize)
HPDF_Page_SetRGBFill (page, c.r, c.g, c.b)
def main ():
global pdf
page_title = "Text Demo"
samp_text = "abcdefgABCDEFG123!#$%&+-@?"
samp_text2 = "The quick brown fox jumps over the lazy dog."
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# set compression mode
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
# draw grid to the page
print_grid (pdf, page)
# print the lines of the page.
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_Rectangle (page, 50, 50, HPDF_Page_GetWidth(page) - 100,
HPDF_Page_GetHeight (page) - 110)
HPDF_Page_Stroke (page)
# print the title of the page (with positioning center).
HPDF_Page_SetFontAndSize (page, font, 24)
tw = HPDF_Page_TextWidth (page, page_title)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, (HPDF_Page_GetWidth(page) - tw) / 2,
HPDF_Page_GetHeight (page) - 50, page_title)
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, HPDF_Page_GetHeight(page) - 60)
# font size
fsize = 8
while (fsize < 60):
# set style and size of font.
HPDF_Page_SetFontAndSize(page, font, fsize)
# set the position of the text.
HPDF_Page_MoveTextPos (page, 0, -5 - fsize)
# measure the number of characters which included in the page.
buf= samp_text
length = HPDF_Page_MeasureText (page, samp_text,
HPDF_Page_GetWidth(page) - 120, HPDF_FALSE, NULL)
# truncate the text.
buf='%*s\0' %(int(length), buf)
HPDF_Page_ShowText (page, buf)
# print the description.
HPDF_Page_MoveTextPos (page, 0, -10)
HPDF_Page_SetFontAndSize(page, font, 8)
buf="Fontsize=%.0f" %fsize
HPDF_Page_ShowText (page, buf)
fsize *= 1.5
# font color
HPDF_Page_SetFontAndSize(page, font, 8)
HPDF_Page_MoveTextPos (page, 0, -30)
HPDF_Page_ShowText (page, "Font color")
HPDF_Page_SetFontAndSize (page, font, 18)
HPDF_Page_MoveTextPos (page, 0, -20)
length = len (samp_text)
for i in range(length):
buf=[None ,None]
r = i / float(length)
g = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, r, g, 0.0)
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveTextPos (page, 0, -25)
for i in range(length):
buf=[None ,None]
r = i / float(length)
b = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, r, 0.0, b)
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveTextPos (page, 0, -25)
for i in range(length):
buf=[None ,None]
b = i / float(length)
g = 1 - (i / float(length))
buf[0] = samp_text[i]
buf[1] = '\0'
HPDF_Page_SetRGBFill (page, 0.0, g, b)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
ypos = 450
#
# Font rendering mode
#
HPDF_Page_SetFontAndSize(page, font, 32)
HPDF_Page_SetRGBFill (page, 0.5, 0.5, 0.0)
HPDF_Page_SetLineWidth (page, 1.5)
# PDF_FILL
show_description (page, 60, ypos,
"RenderingMode=PDF_FILL")
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_STROKE
show_description (page, 60, ypos - 50,
"RenderingMode=PDF_STROKE")
HPDF_Page_SetTextRenderingMode (page, HPDF_STROKE)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 50, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_FILL_THEN_STROKE
show_description (page, 60, ypos - 100,
"RenderingMode=PDF_FILL_THEN_STROKE")
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_THEN_STROKE)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 100, "ABCabc123")
HPDF_Page_EndText (page)
# PDF_FILL_CLIPPING
show_description (page, 60, ypos - 150,
"RenderingMode=PDF_FILL_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 150, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 150)
HPDF_Page_GRestore (page)
# PDF_STROKE_CLIPPING
show_description (page, 60, ypos - 200,
"RenderingMode=PDF_STROKE_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_STROKE_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 200, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 200)
HPDF_Page_GRestore (page)
# PDF_FILL_STROKE_CLIPPING
show_description (page, 60, ypos - 250,
"RenderingMode=PDF_FILL_STROKE_CLIPPING")
HPDF_Page_GSave (page)
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL_STROKE_CLIPPING)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, ypos - 250, "ABCabc123")
HPDF_Page_EndText (page)
show_stripe_pattern (page, 60, ypos - 250)
HPDF_Page_GRestore (page)
# Reset text attributes
HPDF_Page_SetTextRenderingMode (page, HPDF_FILL)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_SetFontAndSize(page, font, 30)
#
# Rotating text
#
angle1 = 30; # A rotation of 30 degrees.
rad1 = angle1 / 180 * 3.141592; # Calcurate the radian value.
show_description (page, 320, ypos - 60, "Rotating text")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, cos(rad1), sin(rad1), -sin(rad1), cos(rad1),
330, ypos - 60)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# Skewing text.
#
show_description (page, 320, ypos - 120, "Skewing text")
HPDF_Page_BeginText (page)
angle1 = 10
angle2 = 20
rad1 = angle1 / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
HPDF_Page_SetTextMatrix (page, 1, tan(rad1), tan(rad2), 1, 320, ypos - 120)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# scaling text (X direction)
#
show_description (page, 320, ypos - 175, "Scaling text (X direction)")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 1.5, 0, 0, 1, 320, ypos - 175)
HPDF_Page_ShowText (page, "ABCabc12")
HPDF_Page_EndText (page)
#
# scaling text (Y direction)
#
show_description (page, 320, ypos - 250, "Scaling text (Y direction)")
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 1, 0, 0, 2, 320, ypos - 250)
HPDF_Page_ShowText (page, "ABCabc123")
HPDF_Page_EndText (page)
#
# char spacing, word spacing
#
show_description (page, 60, 140, "char-spacing 0")
show_description (page, 60, 100, "char-spacing 1.5")
show_description (page, 60, 60, "char-spacing 1.5, word-spacing 2.5")
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_SetRGBFill (page, 0.1, 0.3, 0.1)
## char-spacing 0
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 140, samp_text2)
HPDF_Page_EndText (page)
# char-spacing 1.5
HPDF_Page_SetCharSpace (page, 1.5)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 100, samp_text2)
HPDF_Page_EndText (page)
# char-spacing 1.5, word-spacing 3.5
HPDF_Page_SetWordSpace (page, 2.5)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 60, 60, samp_text2)
HPDF_Page_EndText (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- font_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from math import *
global pdf
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: error_no=%04X, detail_no=%u\n", error_no,
detail_no)
HPDF_Free (pdf)
sys.exit(1)
font_list=[
"Courier",
"Courier-Bold",
"Courier-Oblique",
"Courier-BoldOblique",
"Helvetica",
"Helvetica-Bold",
"Helvetica-Oblique",
"Helvetica-BoldOblique",
"Times-Roman",
"Times-Bold",
"Times-Italic",
"Times-BoldItalic",
"Symbol",
"ZapfDingbats",
]
def main ():
global pdf
page_title = "Font Demo"
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# Add a new page object.
page = HPDF_AddPage (pdf)
height = HPDF_Page_GetHeight (page)
width = HPDF_Page_GetWidth (page)
# Print the lines of the page.
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_Rectangle (page, 50, 50, width - 100, height - 110)
HPDF_Page_Stroke (page)
# Print the title of the page (with positioning center).
def_font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, def_font, 24)
tw = HPDF_Page_TextWidth (page, page_title)
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, (width - tw) / 2, height - 50, page_title)
HPDF_Page_EndText (page)
# output subtitle.
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, def_font, 16)
HPDF_Page_TextOut (page, 60, height - 80, "<Standerd Type1 fonts samples>")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, height - 105)
for i in font_list:
samp_text = "abcdefgABCDEFG12345!#$%&+-@?"
#font_name = HPDF_LoadTTFontFromFile (pdf, "c:/winnt/fonts/arial.ttf", HPDF_TRUE);
#font_name='arial.ttf'
#font = HPDF_GetFont (pdf, font_name, "CP1250");
#font = HPDF_GetFont (pdf, "Helvetica", NULL)
font = HPDF_GetFont (pdf, i, NULL)
# print a label of text
HPDF_Page_SetFontAndSize (page, def_font, 9)
HPDF_Page_ShowText (page, i)
HPDF_Page_MoveTextPos (page, 0, -18)
# print a sample text.
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_EndText (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- encoding_list.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
PAGE_WIDTH = 420
PAGE_HEIGHT = 400
CELL_WIDTH = 20
CELL_HEIGHT = 20
CELL_HEADER = 10
def draw_graph (page):
# Draw 16 X 15 cells
# Draw vertical lines.
HPDF_Page_SetLineWidth (page, 0.5)
for i in range(18):
x = i * CELL_WIDTH + 40
HPDF_Page_MoveTo (page, x, PAGE_HEIGHT - 60)
HPDF_Page_LineTo (page, x, 40)
HPDF_Page_Stroke (page)
if (i > 0 and i <= 16):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, PAGE_HEIGHT - 75)
buf="%X" %(i - 1)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
# Draw horizontal lines.
for i in range(16):
y = i * CELL_HEIGHT + 40
HPDF_Page_MoveTo (page, 40, y)
HPDF_Page_LineTo (page, PAGE_WIDTH - 40, y)
HPDF_Page_Stroke (page)
if (i < 14):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 45, y + 5)
buf="%X" %( 15 - i)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def draw_fonts (page):
HPDF_Page_BeginText (page)
# Draw all character from 0x20 to 0xFF to the canvas.
for i in range(1,17):
for j in range(1,17):
buf=[None, None]
y = PAGE_HEIGHT - 55 - ((i - 1) * CELL_HEIGHT)
x = j * CELL_WIDTH + 50
buf[1] = 0x00
buf[0] = (i - 1) * 16 + (j - 1)
if (buf[0] >= 32):
d = x - HPDF_Page_TextWidth (page, buf) / 2
HPDF_Page_TextOut (page, d, y, buf)
HPDF_Page_EndText (page)
def main ():
encodings=[
"StandardEncoding",
"MacRomanEncoding",
"WinAnsiEncoding",
"ISO8859-2",
"ISO8859-3",
"ISO8859-4",
"ISO8859-5",
"ISO8859-9",
"ISO8859-10",
"ISO8859-13",
"ISO8859-14",
"ISO8859-15",
"ISO8859-16",
"CP1250",
"CP1251",
"CP1252",
"CP1254",
"CP1257",
"KOI8-R",
"Symbol-Set",
"ZapfDingbats-Set",
NULL
]
pdf = HPDF_NewEx (error_handler, NULL, NULL, 0, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
# set compression mode
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# get default font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# load font object
font_name = HPDF_LoadType1FontFromFile (pdf, "type1/a010013l.afm",
"type1/a010013l.pfb")
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "Encoding list", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
i=0
while (encodings[i]):
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, PAGE_WIDTH)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
outline = HPDF_CreateOutline (pdf, root, encodings[i], NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page), 1)
# HPDF_Destination_SetFitB(dst);
HPDF_Outline_SetDestination(outline, dst)
HPDF_Page_SetFontAndSize (page, font, 15)
draw_graph (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 40, PAGE_HEIGHT - 50)
HPDF_Page_ShowText (page, encodings[i])
HPDF_Page_ShowText (page, " Encoding")
HPDF_Page_EndText (page)
if encodings[i]=="Symbol-Set":
font2 = HPDF_GetFont (pdf, "Symbol", NULL)
elif encodings[i]=="ZapfDingbats-Set":
font2 = HPDF_GetFont (pdf, "ZapfDingbats", NULL)
else:
font2 = HPDF_GetFont (pdf, font_name, encodings[i])
HPDF_Page_SetFontAndSize (page, font2, 14)
draw_fonts (page)
i+=1
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ttfont_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 2):
printf ("ttfont_demo_cn <ttf-font-filename> [-E]\n")
printf ("ttfont_demo_cn <ttc-font-filename> <index> [-E]\n")
print
printf (r"for example, ttfont_demo_cn.py c:\winnt\fonts\simfang.ttf -E")
return 1
try:
f = open ("mbtext/cp936.txt", "rb")
except:
printf ("error: cannot open 'mbtext/cp936.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseCNSEncodings (pdf)
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# load ttc file
if len(sys.argv) == 4 and sys.argv[3]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_TRUE)
elif len(sys.argv) == 3 and sys.argv[2]=="-E":
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_TRUE)
elif len(sys.argv) == 3:
detail_font_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1],
int (sys.argv[2]), HPDF_FALSE)
else:
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], HPDF_FALSE)
# add a new page object.
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
detail_font = HPDF_GetFont (pdf, detail_font_name, "GB-EUC-H")
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, detail_font_name)
HPDF_Page_ShowText (page, " (")
HPDF_Page_ShowText (page, HPDF_Font_GetEncodingName (detail_font))
HPDF_Page_ShowText (page, ")")
HPDF_Page_SetFontAndSize (page, detail_font, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font, 10)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font, 16)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font, 23)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font, 30)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
pw = HPDF_Page_TextWidth (page, SAMP_TXT)
page_height = 210
page_width = pw + 40
HPDF_Page_SetWidth (page, page_width)
HPDF_Page_SetHeight (page, page_height)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, 10, page_height - 25)
HPDF_Page_LineTo (page, page_width - 10, page_height - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, page_height - 85)
HPDF_Page_LineTo (page, page_width - 10, page_height - 85)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- chfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 4):
printf ("chfont_demo <cp936-ttc-font-file-name> "
"<cp936-index> <cp932-ttc-font-file-name> <cp932-index>\n")
return 1
fname="mbtext/%s"% "cp932.txt"
cp932 = open (fname, "rb")
if (not cp932):
printf ("error: cannot open cp932.txt\n")
return 1
fname= "mbtext/%s" % "cp936.txt"
cp936 = open (fname, "rb")
if (not cp936):
printf ("error: cannot open cp936.txt\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
HPDF_UseJPEncodings (pdf)
HPDF_UseCNSEncodings (pdf)
fcp936_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[1], int(sys.argv[2]),
HPDF_TRUE)
fcp932_name = HPDF_LoadTTFontFromFile2 (pdf, sys.argv[3], int(sys.argv[4]),
HPDF_TRUE)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_SetWidth (page, 550)
fcp936 = HPDF_GetFont (pdf, fcp936_name, "GBK-EUC-H")
fcp932 = HPDF_GetFont (pdf, fcp932_name, "90ms-RKSJ-H")
print_grid (pdf, page)
HPDF_Page_SetTextLeading (page, 20)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
HPDF_Page_SetTextLeading (page, 25)
buf=cp936.read(1024)
while buf:
HPDF_Page_SetFontAndSize (page, fcp936, 18)
buf ='%s\0' % buf
HPDF_Page_ShowText (page, buf)
buf=cp936.read(1024)
if buf:
HPDF_Page_SetFontAndSize (page, fcp932, 18)
buf ='%s\0' % buf
HPDF_Page_ShowText (page, buf)
HPDF_Page_MoveToNextLine (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
cp936.close ()
cp932.close ()
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- jpfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main():
global pdf
detail_font=[None for i in range(16)]
PAGE_HEIGHT = 210
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
samp_text=f.read(2048)
f.close ()
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# configure pdf-document to be compressed.
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# declaration for using Japanese font, encoding.
HPDF_UseJPEncodings (pdf)
HPDF_UseJPFonts (pdf)
detail_font[0] = HPDF_GetFont (pdf, "MS-Mincyo", "90ms-RKSJ-H")
detail_font[1] = HPDF_GetFont (pdf, "MS-Mincyo,Bold", "90ms-RKSJ-H")
detail_font[2] = HPDF_GetFont (pdf, "MS-Mincyo,Italic", "90ms-RKSJ-H")
detail_font[3] = HPDF_GetFont (pdf, "MS-Mincyo,BoldItalic", "90ms-RKSJ-H")
detail_font[4] = HPDF_GetFont (pdf, "MS-PMincyo", "90msp-RKSJ-H")
detail_font[5] = HPDF_GetFont (pdf, "MS-PMincyo,Bold", "90msp-RKSJ-H")
detail_font[6] = HPDF_GetFont (pdf, "MS-PMincyo,Italic", "90msp-RKSJ-H")
detail_font[7] = HPDF_GetFont (pdf, "MS-PMincyo,BoldItalic",
"90msp-RKSJ-H")
detail_font[8] = HPDF_GetFont (pdf, "MS-Gothic", "90ms-RKSJ-H")
detail_font[9] = HPDF_GetFont (pdf, "MS-Gothic,Bold", "90ms-RKSJ-H")
detail_font[10] = HPDF_GetFont (pdf, "MS-Gothic,Italic", "90ms-RKSJ-H")
detail_font[11] = HPDF_GetFont (pdf, "MS-Gothic,BoldItalic", "90ms-RKSJ-H")
detail_font[12] = HPDF_GetFont (pdf, "MS-PGothic", "90msp-RKSJ-H")
detail_font[13] = HPDF_GetFont (pdf, "MS-PGothic,Bold", "90msp-RKSJ-H")
detail_font[14] = HPDF_GetFont (pdf, "MS-PGothic,Italic", "90msp-RKSJ-H")
detail_font[15] = HPDF_GetFont (pdf, "MS-PGothic,BoldItalic",
"90msp-RKSJ-H")
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, "JP font demo", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
for i in range(16):
# add a new page object.
page = HPDF_AddPage (pdf)
# create outline entry
outline = HPDF_CreateOutline (pdf, root,
HPDF_Font_GetFontName (detail_font[i]), NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Outline_SetDestination(outline, dst)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, HPDF_Font_GetFontName (detail_font[i]))
HPDF_Page_SetFontAndSize (page, detail_font[i], 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font[i], 10)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font[i], 16)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font[i], 23)
HPDF_Page_ShowText (page, samp_text)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font[i], 30)
HPDF_Page_ShowText (page, samp_text)
p = HPDF_Page_GetCurrentTextPos (page)
# finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
x_pos = 20
for j in range(len (samp_text) // 2):
HPDF_Page_MoveTo (page, x_pos, p.y - 10)
HPDF_Page_LineTo (page, x_pos, p.y - 12)
HPDF_Page_Stroke (page)
x_pos = x_pos + 30
HPDF_Page_SetWidth (page, p.x + 20)
HPDF_Page_SetHeight (page, PAGE_HEIGHT)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 25)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, PAGE_HEIGHT - 85)
HPDF_Page_LineTo (page, p.x + 10, PAGE_HEIGHT - 85)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, p.y - 12)
HPDF_Page_LineTo (page, p.x + 10, p.y - 12)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- image_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from math import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def show_description (page, x, y, text):
HPDF_Page_MoveTo (page, x, y - 10)
HPDF_Page_LineTo (page, x, y + 10)
HPDF_Page_MoveTo (page, x - 10, y)
HPDF_Page_LineTo (page, x + 10, y)
HPDF_Page_Stroke (page)
HPDF_Page_SetFontAndSize (page, HPDF_Page_GetCurrentFont (page), 8)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
buf="(x=%d,y=%d)" % (int(x), int(y))
HPDF_Page_MoveTextPos (page, x - HPDF_Page_TextWidth (page, buf) - 5,
y - 10)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x - 20, y - 25)
HPDF_Page_ShowText (page, text)
HPDF_Page_EndText (page)
def main ():
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 550)
HPDF_Page_SetHeight (page, 500)
dst = HPDF_Page_CreateDestination (page)
HPDF_Destination_SetXYZ (dst, 0, HPDF_Page_GetHeight (page), 1)
HPDF_SetOpenAction(pdf, dst)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_MoveTextPos (page, 220, HPDF_Page_GetHeight (page) - 70)
HPDF_Page_ShowText (page, "ImageDemo")
HPDF_Page_EndText (page)
# load image file.
image = HPDF_LoadPngImageFromFile (pdf, "pngsuite/basn3p02.png")
# image1 is masked by image2.
image1 = HPDF_LoadPngImageFromFile (pdf, "pngsuite/basn3p02.png")
# image2 is a mask image.
image2 = HPDF_LoadPngImageFromFile (pdf, "pngsuite/basn0g01.png")
# image3 is a RGB-color image. we use this image for color-mask
# * demo.
image3 = HPDF_LoadPngImageFromFile (pdf, "pngsuite/maskimage.png")
iw = HPDF_Image_GetWidth (image)
ih = HPDF_Image_GetHeight (image)
HPDF_Page_SetLineWidth (page, 0.5)
x = 100
y = HPDF_Page_GetHeight (page) - 150
# Draw image to the canvas. (normal-mode with actual size.)
HPDF_Page_DrawImage (page, image, x, y, iw, ih)
show_description (page, x, y, "Actual Size")
x += 150
# Scalling image (X direction)
HPDF_Page_DrawImage (page, image, x, y, iw * 1.5, ih)
show_description (page, x, y, "Scalling image (X direction)")
x += 150
# Scalling image (Y direction).
HPDF_Page_DrawImage (page, image, x, y, iw, ih * 1.5)
show_description (page, x, y, "Scalling image (Y direction)")
x = 100
y -= 120
# Skewing image.
angle1 = 10
angle2 = 20
rad1 = angle1 / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
HPDF_Page_GSave (page)
HPDF_Page_Concat (page, iw, tan(rad1) * iw, tan(rad2) * ih, ih, x, y)
HPDF_Page_ExecuteXObject (page, image)
HPDF_Page_GRestore (page)
show_description (page, x, y, "Skewing image")
x += 150
# Rotating image
angle = 30; # rotation of 30 degrees.
rad = angle / 180 * 3.141592; # Calcurate the radian value.
HPDF_Page_GSave (page)
HPDF_Page_Concat (page, iw * cos(rad),
iw * sin(rad),
ih * -sin(rad),
ih * cos(rad),
x, y)
HPDF_Page_ExecuteXObject (page, image)
HPDF_Page_GRestore (page)
show_description (page, x, y, "Rotating image")
x += 150
# draw masked image.
# Set image2 to the mask image of image1
HPDF_Image_SetMaskImage (image1, image2)
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x - 6, y + 14)
HPDF_Page_ShowText (page, "MASKMASK")
HPDF_Page_EndText (page)
HPDF_Page_DrawImage (page, image1, x - 3, y - 3, iw + 6, ih + 6)
show_description (page, x, y, "masked image")
x = 100
y -= 120
# color mask.
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x - 6, y + 14)
HPDF_Page_ShowText (page, "MASKMASK")
HPDF_Page_EndText (page)
HPDF_Image_SetColorMask (image3, 0, 255, 0, 0, 0, 255)
HPDF_Page_DrawImage (page, image3, x, y, iw, ih)
show_description (page, x, y, "Color Mask")
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
if HPDF_NOPNGLIB:
printf("WARNING: if you want to run this demo, \n"
"make libhpdf with HPDF_USE_PNGLIB option.\n")
sys.exit(1)
else:
main() | Python |
###
## * << Haru Free PDF Library 2.0.6 >> -- slideshow_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
import random
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, caption, font, style, prev, next):
r = random.random()
g = random.random()
b = random.random()
rect=HPDF_Rect()
HPDF_Page_SetWidth (page, 800)
HPDF_Page_SetHeight (page, 600)
HPDF_Page_SetRGBFill (page, r, g, b)
HPDF_Page_Rectangle (page, 0, 0, 800, 600)
HPDF_Page_Fill (page)
HPDF_Page_SetRGBFill (page, 1.0 - r, 1.0 - g, 1.0 - b)
HPDF_Page_SetFontAndSize (page, font, 30)
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 0.8, 0.0, 0.0, 1.0, 0.0, 0.0)
HPDF_Page_TextOut (page, 50, 530, caption)
HPDF_Page_SetTextMatrix (page, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_TextOut (page, 55, 300,
"Type \"Ctrl+L\" in order to return from full screen mode.")
HPDF_Page_EndText (page)
HPDF_Page_SetSlideShow (page, style, 5.0, 1.0)
HPDF_Page_SetFontAndSize (page, font, 20)
if (next):
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 680, 50, "Next=>")
HPDF_Page_EndText (page)
rect.left = 680
rect.right = 750
rect.top = 70
rect.bottom = 50
dst = HPDF_Page_CreateDestination (next)
HPDF_Destination_SetFit(dst)
annot = HPDF_Page_CreateLinkAnnot (page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
if (prev):
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 50, 50, "<=Prev")
HPDF_Page_EndText (page)
rect.left = 50
rect.right = 110
rect.top = 70
rect.bottom = 50
dst = HPDF_Page_CreateDestination (prev)
HPDF_Destination_SetFit(dst)
annot = HPDF_Page_CreateLinkAnnot (page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
def main():
global pdf
page=[None for i in range(17)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Courier", NULL)
# Add 17 pages to the document.
page[0] = HPDF_AddPage (pdf)
page[1] = HPDF_AddPage (pdf)
page[2] = HPDF_AddPage (pdf)
page[3] = HPDF_AddPage (pdf)
page[4] = HPDF_AddPage (pdf)
page[5] = HPDF_AddPage (pdf)
page[6] = HPDF_AddPage (pdf)
page[7] = HPDF_AddPage (pdf)
page[8] = HPDF_AddPage (pdf)
page[9] = HPDF_AddPage (pdf)
page[10] = HPDF_AddPage (pdf)
page[11] = HPDF_AddPage (pdf)
page[12] = HPDF_AddPage (pdf)
page[13] = HPDF_AddPage (pdf)
page[14] = HPDF_AddPage (pdf)
page[15] = HPDF_AddPage (pdf)
page[16] = HPDF_AddPage (pdf)
print_page(page[0], "HPDF_TS_WIPE_RIGHT", font,
HPDF_TS_WIPE_RIGHT, NULL, page[1])
print_page(page[1], "HPDF_TS_WIPE_UP", font,
HPDF_TS_WIPE_UP, page[0], page[2])
print_page(page[2], "HPDF_TS_WIPE_LEFT", font,
HPDF_TS_WIPE_LEFT, page[1], page[3])
print_page(page[3], "HPDF_TS_WIPE_DOWN", font,
HPDF_TS_WIPE_DOWN, page[2], page[4])
print_page(page[4], "HPDF_TS_BARN_DOORS_HORIZONTAL_OUT", font,
HPDF_TS_BARN_DOORS_HORIZONTAL_OUT, page[3], page[5])
print_page(page[5], "HPDF_TS_BARN_DOORS_HORIZONTAL_IN", font,
HPDF_TS_BARN_DOORS_HORIZONTAL_IN, page[4], page[6])
print_page(page[6], "HPDF_TS_BARN_DOORS_VERTICAL_OUT", font,
HPDF_TS_BARN_DOORS_VERTICAL_OUT, page[5], page[7])
print_page(page[7], "HPDF_TS_BARN_DOORS_VERTICAL_IN", font,
HPDF_TS_BARN_DOORS_VERTICAL_IN, page[6], page[8])
print_page(page[8], "HPDF_TS_BOX_OUT", font,
HPDF_TS_BOX_OUT, page[7], page[9])
print_page(page[9], "HPDF_TS_BOX_IN", font,
HPDF_TS_BOX_IN, page[8], page[10])
print_page(page[10], "HPDF_TS_BLINDS_HORIZONTAL", font,
HPDF_TS_BLINDS_HORIZONTAL, page[9], page[11])
print_page(page[11], "HPDF_TS_BLINDS_VERTICAL", font,
HPDF_TS_BLINDS_VERTICAL, page[10], page[12])
print_page(page[12], "HPDF_TS_DISSOLVE", font,
HPDF_TS_DISSOLVE, page[11], page[13])
print_page(page[13], "HPDF_TS_GLITTER_RIGHT", font,
HPDF_TS_GLITTER_RIGHT, page[12], page[14])
print_page(page[14], "HPDF_TS_GLITTER_DOWN", font,
HPDF_TS_GLITTER_DOWN, page[13], page[15])
print_page(page[15], "HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT", font,
HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT, page[14], page[16])
print_page(page[16], "HPDF_TS_REPLACE", font,
HPDF_TS_REPLACE, page[15], NULL)
HPDF_SetPageMode (pdf, HPDF_PAGE_MODE_FULL_SCREEN)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
#coding=utf-8
###
## * << Haru Free PDF Library 2.0.0 >> -- text_annotation.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main():
rect1 =HPDF_Rect(50, 350, 150, 400)
rect2 =HPDF_Rect(210, 350, 350, 400)
rect3 =HPDF_Rect(50, 250, 150, 300)
rect4 =HPDF_Rect(210, 250, 350, 300)
rect5 =HPDF_Rect(50, 150, 150, 200)
rect6 =HPDF_Rect(210, 150, 350, 200)
rect7 =HPDF_Rect(50, 50, 150, 100)
rect8 =HPDF_Rect(210, 50, 350, 100)
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# use Times-Roman font.
font = HPDF_GetFont (pdf, "Times-Roman", "WinAnsiEncoding")
page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (page, 400)
HPDF_Page_SetHeight (page, 500)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 16)
HPDF_Page_MoveTextPos (page, 130, 450)
HPDF_Page_ShowText (page, "Annotation Demo")
HPDF_Page_EndText (page)
annot = HPDF_Page_CreateTextAnnot (page, rect1, "Annotation with Comment "
"Icon. \n This annotation set to be opened initially.",
NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_COMMENT)
HPDF_TextAnnot_SetOpened (annot, HPDF_TRUE)
annot = HPDF_Page_CreateTextAnnot (page, rect2,
"Annotation with Key Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_PARAGRAPH)
annot = HPDF_Page_CreateTextAnnot (page, rect3,
"Annotation with Note Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_NOTE)
annot = HPDF_Page_CreateTextAnnot (page, rect4,
"Annotation with Help Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_HELP)
annot = HPDF_Page_CreateTextAnnot (page, rect5,
"Annotation with NewParagraph Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_NEW_PARAGRAPH)
annot = HPDF_Page_CreateTextAnnot (page, rect6,
"Annotation with Paragraph Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_PARAGRAPH)
annot = HPDF_Page_CreateTextAnnot (page, rect7,
"Annotation with Insert Icon", NULL)
HPDF_TextAnnot_SetIcon (annot, HPDF_ANNOT_ICON_INSERT)
encoding = HPDF_GetEncoder (pdf, "ISO8859-2")
#HPDF_Page_CreateTextAnnot (page, rect8,
# "Annotation with ISO8859 text 釉罩棕?, encoding)
HPDF_Page_SetFontAndSize (page, font, 11)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect1.left + 35, rect1.top - 20)
HPDF_Page_ShowText (page, "Comment Icon.")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect2.left + 35, rect2.top - 20)
HPDF_Page_ShowText (page, "Key Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect3.left + 35, rect3.top - 20)
HPDF_Page_ShowText (page, "Note Icon.")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect4.left + 35, rect4.top - 20)
HPDF_Page_ShowText (page, "Help Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect5.left + 35, rect5.top - 20)
HPDF_Page_ShowText (page, "NewParagraph Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect6.left + 35, rect6.top - 20)
HPDF_Page_ShowText (page, "Paragraph Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect7.left + 35, rect7.top - 20)
HPDF_Page_ShowText (page, "Insert Icon")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, rect8.left + 35, rect8.top - 20)
HPDF_Page_ShowText (page, "Text Icon(ISO8859-2 text)")
HPDF_Page_EndText (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- character_map.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
## * usage character_map <encoding-name> <low-range-from> <low-range-to>
## * <high-range-from> <high-range-to>
## * ex. character_map 90ms-RKSJ-V 0x80 0x
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_page(pdf, page, title_font, font, h_byte, l_byte):
PAGE_WIDTH = 420
CELL_HEIGHT = 20
CELL_WIDTH = 20
l_byte = int(l_byte / 16) * 16
h_count = 16 - (l_byte / 16)
page_height = 40 + 40 + (h_count + 1) * CELL_HEIGHT
HPDF_Page_SetHeight (page, page_height)
HPDF_Page_SetWidth (page, PAGE_WIDTH)
HPDF_Page_SetFontAndSize (page, title_font, 10)
ypos = h_count + 1
while True:
y = (ypos) * CELL_HEIGHT + 40
HPDF_Page_MoveTo (page, 40, y)
HPDF_Page_LineTo (page, 380, y)
HPDF_Page_Stroke (page)
if (ypos < h_count):
buf=[None,None]
buf[0] = 16 - ypos - 1
if (buf[0] < 10):
buf[0] += ord('0')
else:
buf[0] += (ord('A') - 10)
buf[1] = 0
buf=[i % 256 for i in buf] #because buf is unsigned char *
w = HPDF_Page_TextWidth (page, buf)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 40 + (20 - w) / 2, y + 5)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
if (ypos == 0):
break
ypos-=1
for xpos in range(18):
y = (h_count + 1) * CELL_HEIGHT + 40
x = xpos * CELL_WIDTH + 40
HPDF_Page_MoveTo (page, x, 40)
HPDF_Page_LineTo (page, x, y)
HPDF_Page_Stroke (page)
if (xpos > 0 and xpos <= 16):
buf=[None,None]
buf[0] = xpos - 1
if (buf[0] < 10):
buf[0] += ord('0')
else:
buf[0] += (ord('A') - 10)
buf[1] = 0
buf=[i % 256 for i in buf] #because buf is unsigned char *
w = HPDF_Page_TextWidth(page, buf)
HPDF_Page_BeginText(page)
HPDF_Page_MoveTextPos(page, x + (20 - w) / 2,
h_count * CELL_HEIGHT + 45)
HPDF_Page_ShowText(page, buf)
HPDF_Page_EndText(page)
HPDF_Page_SetFontAndSize (page, font, 15)
ypos = h_count
while True:
y = (ypos - 1) * CELL_HEIGHT + 45
for xpos in range(16):
buf=[None for i in range(3)]
x = xpos * CELL_WIDTH + 40 + CELL_WIDTH
buf[0] = h_byte
buf[1] = (16 - ypos) * 16 + xpos
buf[2] = 0x00
buf=[i % 256 for i in buf] #because buf is unsigned char *
w = HPDF_Page_TextWidth(page, buf)
if (w > 0):
HPDF_Page_BeginText(page)
HPDF_Page_MoveTextPos(page, x + (20 - w) / 2, y)
HPDF_Page_ShowText(page, buf)
HPDF_Page_EndText(page)
if (ypos == 0):
break
ypos-=1
def main ():
global pdf
flg=[HPDF_UINT16(0) for i in range(256)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
if (len(sys.argv) < 3):
printf ("usage: character_map <encoding-name> <font-name>\n")
printf ('for example, character_map.py GBK-EUC-H SimHei,Bold')
return 1
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# configure pdf-document (showing outline, compression enabled)
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
HPDF_SetCompressionMode (pdf, HPDF_COMP_ALL)
HPDF_SetPagesConfiguration (pdf, 10)
HPDF_UseJPEncodings (pdf)
HPDF_UseJPFonts (pdf)
HPDF_UseKREncodings (pdf)
HPDF_UseKRFonts (pdf)
HPDF_UseCNSEncodings (pdf)
HPDF_UseCNSFonts (pdf)
HPDF_UseCNTEncodings (pdf)
HPDF_UseCNTFonts (pdf)
encoder = HPDF_GetEncoder (pdf, sys.argv[1])
if (HPDF_Encoder_GetType (encoder) != HPDF_ENCODER_TYPE_DOUBLE_BYTE):
printf ("error: %s is not cmap-encoder\n", sys.argv[1])
HPDF_Free (pdf)
return 1
font = HPDF_GetFont (pdf, sys.argv[2], sys.argv[1])
min_l = 255
min_h = 256
max_l = 0
max_h = 0
for i in range(256):
for j in range(20, 256):
buf=[None, None ,None]
code = i * 256 + j
buf[0] = i
buf[1] = j
buf[2] = 0
btype = HPDF_Encoder_GetByteType (encoder, buf, 0)
unicode = HPDF_Encoder_GetUnicode (encoder, code)
if (btype == HPDF_BYTE_TYPE_LEAD and
unicode != 0x25A1):
if (min_l > j):
min_l = j
if (max_l < j):
max_l = j
if (min_h > i):
min_h = i
if (max_h < i):
max_h = i
flg[i] = 1
printf ("min_h=%04X max_h=%04X min_l=%04X max_l=%04X\n",
min_h, max_h, min_l, max_l)
# create outline root.
root = HPDF_CreateOutline (pdf, NULL, sys.argv[1], NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
for i in range(256):
if (flg[i]):
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
buf="0x%04X-0x%04X" %(
(i * 256 + min_l),
(i * 256 + max_l)
)
outline = HPDF_CreateOutline (pdf, root, buf, NULL)
dst = HPDF_Page_CreateDestination (page)
HPDF_Outline_SetDestination(outline, dst)
draw_page (pdf, page, title_font, font, i, min_l)
buf="%s (%s) 0x%04X-0x%04X" %(
sys.argv[1],
sys.argv[2],
(i * 256 + min_l),
(i * 256 + max_l)
)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 40, HPDF_Page_GetHeight (page) - 35)
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
HPDF_SaveToFile (pdf, fname)
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- line_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def draw_line (page, x, y, label):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, y - 10)
HPDF_Page_ShowText (page, label)
HPDF_Page_EndText (page)
HPDF_Page_MoveTo (page, x, y - 15)
HPDF_Page_LineTo (page, x + 220, y - 15)
HPDF_Page_Stroke (page)
def draw_line2 (page, x, y, label):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, y)
HPDF_Page_ShowText (page, label)
HPDF_Page_EndText (page)
HPDF_Page_MoveTo (page, x + 30, y - 25)
HPDF_Page_LineTo (page, x + 160, y - 25)
HPDF_Page_Stroke (page)
def draw_rect (page, x, y, label):
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x, y - 10)
HPDF_Page_ShowText (page, label)
HPDF_Page_EndText (page)
HPDF_Page_Rectangle(page, x, y - 40, 220, 25)
def main ():
global pdf
page_title = "Line Example"
DASH_MODE1= [3]
DASH_MODE2= [3, 7]
DASH_MODE3= [8, 7, 2, 7]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# add a new page object.
page = HPDF_AddPage (pdf)
# print the lines of the page.
HPDF_Page_SetLineWidth (page, 1)
HPDF_Page_Rectangle (page, 50, 50, HPDF_Page_GetWidth(page) - 100,
HPDF_Page_GetHeight (page) - 110)
HPDF_Page_Stroke (page)
# print the title of the page (with positioning center).
HPDF_Page_SetFontAndSize (page, font, 24)
tw = HPDF_Page_TextWidth (page, page_title)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, (HPDF_Page_GetWidth(page) - tw) / 2,
HPDF_Page_GetHeight (page) - 50)
HPDF_Page_ShowText (page, page_title)
HPDF_Page_EndText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
# Draw verious widths of lines.
HPDF_Page_SetLineWidth (page, 0)
draw_line (page, 60, 770, "line width = 0")
HPDF_Page_SetLineWidth (page, 1.0)
draw_line (page, 60, 740, "line width = 1.0")
HPDF_Page_SetLineWidth (page, 2.0)
draw_line (page, 60, 710, "line width = 2.0")
# Line dash pattern
HPDF_Page_SetLineWidth (page, 1.0)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 1)
draw_line (page, 60, 680, "dash_ptn=[3], phase=1 -- "
"2 on, 3 off, 3 on...")
HPDF_Page_SetDash (page, DASH_MODE2, 2, 2)
draw_line (page, 60, 650, "dash_ptn=[7, 3], phase=2 -- "
"5 on 3 off, 7 on,...")
HPDF_Page_SetDash (page, DASH_MODE3, 4, 0)
draw_line (page, 60, 620, "dash_ptn=[8, 7, 2, 7], phase=0")
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 30)
HPDF_Page_SetRGBStroke (page, 0.0, 0.5, 0.0)
# Line Cap Style
HPDF_Page_SetLineCap (page, HPDF_BUTT_END)
draw_line2 (page, 60, 570, "PDF_BUTT_END")
HPDF_Page_SetLineCap (page, HPDF_ROUND_END)
draw_line2 (page, 60, 505, "PDF_ROUND_END")
HPDF_Page_SetLineCap (page, HPDF_PROJECTING_SCUARE_END)
draw_line2 (page, 60, 440, "PDF_PROJECTING_SCUARE_END")
# Line Join Style
HPDF_Page_SetLineWidth (page, 30)
HPDF_Page_SetRGBStroke (page, 0.0, 0.0, 0.5)
HPDF_Page_SetLineJoin (page, HPDF_MITER_JOIN)
HPDF_Page_MoveTo (page, 120, 300)
HPDF_Page_LineTo (page, 160, 340)
HPDF_Page_LineTo (page, 200, 300)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, 360)
HPDF_Page_ShowText (page, "PDF_MITER_JOIN")
HPDF_Page_EndText (page)
HPDF_Page_SetLineJoin (page, HPDF_ROUND_JOIN)
HPDF_Page_MoveTo (page, 120, 195)
HPDF_Page_LineTo (page, 160, 235)
HPDF_Page_LineTo (page, 200, 195)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, 255)
HPDF_Page_ShowText (page, "PDF_ROUND_JOIN")
HPDF_Page_EndText (page)
HPDF_Page_SetLineJoin (page, HPDF_BEVEL_JOIN)
HPDF_Page_MoveTo (page, 120, 90)
HPDF_Page_LineTo (page, 160, 130)
HPDF_Page_LineTo (page, 200, 90)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 60, 150)
HPDF_Page_ShowText (page, "PDF_BEVEL_JOIN")
HPDF_Page_EndText (page)
# Draw Rectangle
HPDF_Page_SetLineWidth (page, 2)
HPDF_Page_SetRGBStroke (page, 0, 0, 0)
HPDF_Page_SetRGBFill (page, 0.75, 0.0, 0.0)
draw_rect (page, 300, 770, "Stroke")
HPDF_Page_Stroke (page)
draw_rect (page, 300, 720, "Fill")
HPDF_Page_Fill (page)
draw_rect (page, 300, 670, "Fill then Stroke")
HPDF_Page_FillStroke (page)
# Clip Rect
HPDF_Page_GSave (page); # Save the current graphic state
draw_rect (page, 300, 620, "Clip Rectangle")
HPDF_Page_Clip (page)
HPDF_Page_Stroke (page)
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 290, 600)
HPDF_Page_SetTextLeading (page, 12)
HPDF_Page_ShowText (page,
"Clip Clip Clip Clip Clip Clipi Clip Clip Clip")
HPDF_Page_ShowTextNextLine (page,
"Clip Clip Clip Clip Clip Clip Clip Clip Clip")
HPDF_Page_ShowTextNextLine (page,
"Clip Clip Clip Clip Clip Clip Clip Clip Clip")
HPDF_Page_EndText (page)
HPDF_Page_GRestore (page)
# Curve Example(CurveTo2)
x = 330
y = 440
x1 = 430
y1 = 530
x2 = 480
y2 = 470
x3 = 480
y3 = 90
HPDF_Page_SetRGBFill (page, 0, 0, 0)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 300, 540)
HPDF_Page_ShowText (page, "CurveTo2(x1, y1, x2. y2)")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, y - 5)
HPDF_Page_ShowText (page, "Current point")
HPDF_Page_MoveTextPos (page, x1 - x, y1 - y)
HPDF_Page_ShowText (page, "(x1, y1)")
HPDF_Page_MoveTextPos (page, x2 - x1, y2 - y1)
HPDF_Page_ShowText (page, "(x2, y2)")
HPDF_Page_EndText (page)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 0)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, x1, y1)
HPDF_Page_LineTo (page, x2, y2)
HPDF_Page_Stroke (page)
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 1.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_CurveTo2 (page, x1, y1, x2, y2)
HPDF_Page_Stroke (page)
# Curve Example(CurveTo3)
y -= 150
y1 -= 150
y2 -= 150
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 300, 390)
HPDF_Page_ShowText (page, "CurveTo3(x1, y1, x2. y2)")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, y - 5)
HPDF_Page_ShowText (page, "Current point")
HPDF_Page_MoveTextPos (page, x1 - x, y1 - y)
HPDF_Page_ShowText (page, "(x1, y1)")
HPDF_Page_MoveTextPos (page, x2 - x1, y2 - y1)
HPDF_Page_ShowText (page, "(x2, y2)")
HPDF_Page_EndText (page)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 0)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_LineTo (page, x1, y1)
HPDF_Page_Stroke (page)
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 1.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_CurveTo3 (page, x1, y1, x2, y2)
HPDF_Page_Stroke (page)
# Curve Example(CurveTo)
y -= 150
y1 -= 160
y2 -= 130
x2 += 10
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 300, 240)
HPDF_Page_ShowText (page, "CurveTo(x1, y1, x2. y2, x3, y3)")
HPDF_Page_EndText (page)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, x + 5, y - 5)
HPDF_Page_ShowText (page, "Current point")
HPDF_Page_MoveTextPos (page, x1 - x, y1 - y)
HPDF_Page_ShowText (page, "(x1, y1)")
HPDF_Page_MoveTextPos (page, x2 - x1, y2 - y1)
HPDF_Page_ShowText (page, "(x2, y2)")
HPDF_Page_MoveTextPos (page, x3 - x2, y3 - y2)
HPDF_Page_ShowText (page, "(x3, y3)")
HPDF_Page_EndText (page)
HPDF_Page_SetDash (page, DASH_MODE1, 1, 0)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_LineTo (page, x1, y1)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, x2, y2)
HPDF_Page_LineTo (page, x3, y3)
HPDF_Page_Stroke (page)
HPDF_Page_SetDash (page, NULL, 0, 0)
HPDF_Page_SetLineWidth (page, 1.5)
HPDF_Page_MoveTo (page, x, y)
HPDF_Page_CurveTo (page, x1, y1, x2, y2, x3, y3)
HPDF_Page_Stroke (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- ttfont_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
SAMP_TXT = "The quick brown fox jumps over the lazy dog."
if (len(sys.argv) < 2):
printf("usage: ttfont_demo [path to font file] "
"-E(embedding font).\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# Add a new page object.
page = HPDF_AddPage (pdf)
title_font = HPDF_GetFont (pdf, "Helvetica", NULL)
if (len(sys.argv) > 2 and sys.argv[2]=="-E"):
embed = HPDF_TRUE
else:
embed = HPDF_FALSE
detail_font_name = HPDF_LoadTTFontFromFile (pdf, sys.argv[1], embed)
detail_font = HPDF_GetFont (pdf, detail_font_name, NULL)
HPDF_Page_SetFontAndSize (page, title_font, 10)
HPDF_Page_BeginText (page)
# Move the position of the text to top of the page.
HPDF_Page_MoveTextPos(page, 10, 190)
HPDF_Page_ShowText (page, detail_font_name)
if (embed):
HPDF_Page_ShowText (page, "(Embedded Subset)")
HPDF_Page_SetFontAndSize (page, detail_font, 15)
HPDF_Page_MoveTextPos (page, 10, -20)
HPDF_Page_ShowText (page, "abcdefghijklmnopqrstuvwxyz")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_ShowText (page, "1234567890")
HPDF_Page_MoveTextPos (page, 0, -20)
HPDF_Page_SetFontAndSize (page, detail_font, 10)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -18)
HPDF_Page_SetFontAndSize (page, detail_font, 16)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -27)
HPDF_Page_SetFontAndSize (page, detail_font, 23)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
HPDF_Page_SetFontAndSize (page, detail_font, 30)
HPDF_Page_ShowText (page, SAMP_TXT)
HPDF_Page_MoveTextPos (page, 0, -36)
pw = HPDF_Page_TextWidth (page, SAMP_TXT)
page_height = 210
page_width = pw + 40
HPDF_Page_SetWidth (page, page_width)
HPDF_Page_SetHeight (page, page_height)
# Finish to print text.
HPDF_Page_EndText (page)
HPDF_Page_SetLineWidth (page, 0.5)
HPDF_Page_MoveTo (page, 10, page_height - 25)
HPDF_Page_LineTo (page, page_width - 10, page_height - 25)
HPDF_Page_Stroke (page)
HPDF_Page_MoveTo (page, 10, page_height - 85)
HPDF_Page_LineTo (page, page_width - 10, page_height - 85)
HPDF_Page_Stroke (page)
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- arc_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
from grid_sheet import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetHeight (page, 220)
HPDF_Page_SetWidth (page, 200)
# draw grid to the page
print_grid (pdf, page)
# draw pie chart
# *
# * A: 45% Red
# * B: 25% Blue
# * C: 15% green
# * D: other yellow
# A
HPDF_Page_SetRGBFill (page, 1.0, 0, 0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, 100, 180)
HPDF_Page_Arc (page, 100, 100, 80, 0, 360 * 0.45)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# B
HPDF_Page_SetRGBFill (page, 0, 0, 1.0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, pos.x, pos.y)
HPDF_Page_Arc (page, 100, 100, 80, 360 * 0.45, 360 * 0.7)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# C
HPDF_Page_SetRGBFill (page, 0, 1.0, 0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, pos.x, pos.y)
HPDF_Page_Arc (page, 100, 100, 80, 360 * 0.7, 360 * 0.85)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# D
HPDF_Page_SetRGBFill (page, 1.0, 1.0, 0)
HPDF_Page_MoveTo (page, 100, 100)
HPDF_Page_LineTo (page, pos.x, pos.y)
HPDF_Page_Arc (page, 100, 100, 80, 360 * 0.85, 360)
pos = HPDF_Page_GetCurrentPos (page)
HPDF_Page_LineTo (page, 100, 100)
HPDF_Page_Fill (page)
# draw center circle
HPDF_Page_SetGrayStroke (page, 0)
HPDF_Page_SetGrayFill (page, 1)
HPDF_Page_Circle (page, 100, 100, 30)
HPDF_Page_Fill (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- make_rawimage.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def main ():
global pdf
if (len(sys.argv) < 2):
printf ("usage: make_rawimage <in-file-name> <out-file-name>\n")
return 1
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# load image file.
image = HPDF_LoadPngImageFromFile (pdf, sys.argv[1])
iw = HPDF_Image_GetWidth (image)
ih = HPDF_Image_GetHeight (image)
bits_per_comp = HPDF_Image_GetBitsPerComponent (image)
cs = HPDF_Image_GetColorSpace (image)
printf ("width=%u\n", iw)
printf ("height=%u\n", ih)
printf ("bits_per_comp=%u\n", bits_per_comp)
printf ("color_space=%s\n", cs)
# save raw-data to file
stream = HPDF_FileWriter_New (pdf.mmgr, sys.argv[2])
if (not stream):
printf ("cannot open %s\n", sys.argv[2])
else:
HPDF_Stream_WriteToStream(image.stream, stream, 0, NULL)
HPDF_Stream_Free (stream)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Alternative PDF Library 1.0.0 >> -- text_demo2.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from grid_sheet import *
from math import *
global pdf
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
printf ("ERROR: error_no=%04X, detail_no=%u\n", error_no,
detail_no)
HPDF_Free (pdf)
global no
no = 0
def PrintText(page):
#char buf[512]
pos = HPDF_Page_GetCurrentTextPos (page)
no+=1
buf=".[%d]%0.2f %0.2f" %( no, pos.x, pos.y)
HPDF_Page_ShowText(page, buf)
def main ():
global pdf
rect=HPDF_Rect()
SAMP_TXT = "The quick brown fox jumps over the lazy dog. "
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# add a new page object.
page = HPDF_AddPage (pdf)
HPDF_Page_SetSize (page, HPDF_PAGE_SIZE_A5, HPDF_PAGE_PORTRAIT)
print_grid (pdf, page)
page_height = HPDF_Page_GetHeight (page)
font = HPDF_GetFont (pdf, "Helvetica", NULL)
HPDF_Page_SetTextLeading (page, 20)
# text_rect method
# HPDF_TALIGN_LEFT
rect.left = 25
rect.top = 545
rect.right = 200
rect.bottom = rect.top - 40
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_LEFT")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_LEFT, NULL)
HPDF_Page_EndText (page)
# HPDF_TALIGN_RIGTH
rect.left = 220
rect.right = 395
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_RIGTH")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_RIGHT, NULL)
HPDF_Page_EndText (page)
# HPDF_TALIGN_CENTER
rect.left = 25
rect.top = 475
rect.right = 200
rect.bottom = rect.top - 40
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_CENTER")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_CENTER, NULL)
HPDF_Page_EndText (page)
# HPDF_TALIGN_JUSTIFY
rect.left = 220
rect.right = 395
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "HPDF_TALIGN_JUSTIFY")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_JUSTIFY, NULL)
HPDF_Page_EndText (page)
# Skewed coordinate system
HPDF_Page_GSave (page)
angle1 = 5
angle2 = 10
rad1 = angle1 / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
HPDF_Page_Concat (page, 1, tan(rad1), tan(rad2), 1, 25, 350)
rect.left = 0
rect.top = 40
rect.right = 175
rect.bottom = 0
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "Skewed coordinate system")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_LEFT, NULL)
HPDF_Page_EndText (page)
HPDF_Page_GRestore (page)
# Rotated coordinate system
HPDF_Page_GSave (page)
angle1 = 5
rad1 = angle1 / 180 * 3.141592
HPDF_Page_Concat (page, cos(rad1), sin(rad1), -sin(rad1), cos(rad1), 220, 350)
rect.left = 0
rect.top = 40
rect.right = 175
rect.bottom = 0
HPDF_Page_Rectangle (page, rect.left, rect.bottom, rect.right - rect.left,
rect.top - rect.bottom)
HPDF_Page_Stroke (page)
HPDF_Page_BeginText (page)
HPDF_Page_SetFontAndSize (page, font, 10)
HPDF_Page_TextOut (page, rect.left, rect.top + 3, "Rotated coordinate system")
HPDF_Page_SetFontAndSize (page, font, 13)
HPDF_Page_TextRect (page, rect.left, rect.top, rect.right, rect.bottom,
SAMP_TXT, HPDF_TALIGN_LEFT, NULL)
HPDF_Page_EndText (page)
HPDF_Page_GRestore (page)
# text along a circle
HPDF_Page_SetGrayStroke (page, 0)
HPDF_Page_Circle (page, 210, 190, 145)
HPDF_Page_Circle (page, 210, 190, 113)
HPDF_Page_Stroke (page)
angle1 = 360.0 / (len (SAMP_TXT))
angle2 = 180
HPDF_Page_BeginText (page)
font = HPDF_GetFont (pdf, "Courier-Bold", NULL)
HPDF_Page_SetFontAndSize (page, font, 30)
for i in range(len (SAMP_TXT)):
buf=[None, None]
rad1 = (angle2 - 90) / 180 * 3.141592
rad2 = angle2 / 180 * 3.141592
x = 210 + cos(rad2) * 122
y = 190 + sin(rad2) * 122
HPDF_Page_SetTextMatrix(page, cos(rad1), sin(rad1), -sin(rad1), cos(rad1), x, y)
buf[0] = SAMP_TXT[i]
buf[1] = chr(0)
HPDF_Page_ShowText (page, buf)
angle2 -= angle1
HPDF_Page_EndText (page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | Python |
###
## * << Haru Free PDF Library 2.0.0 >> -- link_annotation.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <takeshi_kanno@est.hi-ho.ne.jp>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, font, page_num):
HPDF_Page_SetWidth (page, 200)
HPDF_Page_SetHeight (page, 200)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 150)
buf= "Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
page=[None for i in range(9)]
rect=HPDF_Rect()
uri = "http://libharu.org"
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# create index page
index_page = HPDF_AddPage (pdf)
HPDF_Page_SetWidth (index_page, 300)
HPDF_Page_SetHeight (index_page, 220)
# Add 7 pages to the document.
for i in range(7):
page[i] = HPDF_AddPage (pdf)
print_page(page[i], font, i + 1)
HPDF_Page_BeginText (index_page)
HPDF_Page_SetFontAndSize (index_page, font, 10)
HPDF_Page_MoveTextPos (index_page, 15, 200)
HPDF_Page_ShowText (index_page, "Link Annotation Demo")
HPDF_Page_EndText (index_page)
##
# * Create Link-Annotation object on index page.
#
HPDF_Page_BeginText(index_page)
HPDF_Page_SetFontAndSize (index_page, font, 8)
HPDF_Page_MoveTextPos (index_page, 20, 180)
HPDF_Page_SetTextLeading (index_page, 23)
# page1 (HPDF_ANNOT_NO_HIGHTLIGHT)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page1 (HilightMode=HPDF_ANNOT_NO_HIGHTLIGHT)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[0])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_NO_HIGHTLIGHT)
# page2 (HPDF_ANNOT_INVERT_BOX)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page2 (HilightMode=HPDF_ANNOT_INVERT_BOX)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[1])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
# page3 (HPDF_ANNOT_INVERT_BORDER)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page3 (HilightMode=HPDF_ANNOT_INVERT_BORDER)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[2])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BORDER)
# page4 (HPDF_ANNOT_DOWN_APPEARANCE)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page4 (HilightMode=HPDF_ANNOT_DOWN_APPEARANCE)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[3])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_DOWN_APPEARANCE)
# page5 (dash border)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page5 (dash border)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[4])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 1, 3, 2)
# page6 (no border)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page6 (no border)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[5])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
# page7 (bold border)
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "Jump to Page7 (bold border)")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_MoveToNextLine (index_page)
dst = HPDF_Page_CreateDestination (page[6])
annot = HPDF_Page_CreateLinkAnnot (index_page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 2, 0, 0)
# URI link
tp = HPDF_Page_GetCurrentTextPos (index_page)
HPDF_Page_ShowText (index_page, "URI (")
HPDF_Page_ShowText (index_page, uri)
HPDF_Page_ShowText (index_page, ")")
rect.left = tp.x - 4
rect.bottom = tp.y - 4
rect.right = HPDF_Page_GetCurrentTextPos (index_page).x + 4
rect.top = tp.y + 10
HPDF_Page_CreateURILinkAnnot (index_page, rect, uri)
HPDF_Page_EndText (index_page)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main()
| Python |
# Django settings for Project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Dawid Cech', 'dawid.cech@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'sqlite3',
'NAME': 'Project.db',
}
}
EXAMPLE_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Warsaw'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pl'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = 'myapp/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'i*gvp)l66f8c9htc4(awpzjy)4+_(s764#xhbu$l6drf!q!ac$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
'/teplates/'
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.databrowse',
'django.contrib.flatpages',
'django.contrib.comments',
'myapp',
'mptt'
#'south'
)
GOOGLE_MAPS_API_KEY = 'ABQIAAAACl8T4VKrreBUc3WLqYqPsRRZmV3f8ltJLzIdbaEVtj2mjMrbhBTCIuX5TDJr-OmGuyERLwQHB1OLEg'
| Python |
from django.db import models
from mptt.models import MPTTModel
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
class ItemType(MPTTModel):
name = models.CharField(max_length=200)
slug = models.SlugField(editable=False)
parent = models.ForeignKey('self',null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
def save(self):
self.slug = slugify(self.name)
super(ItemType, self).save()
class Item(models.Model):
type = models.ForeignKey('ItemType')
quantityType = models.ForeignKey('QuantityType')
quantity = models.DecimalField(max_digits=5, decimal_places=2)
def __unicode__(self):
return self.type.name
class QuantityType(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(editable=False)
def __unicode__(self):
return self.name
def save(self):
self.slug = slugify(self.name)
super(QuantityType, self).save()
class Activity(MPTTModel):
name = models.CharField(max_length=200)
slug = models.SlugField(editable=False)
parent = models.ForeignKey('self',null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
def save(self):
self.slug = slugify(self.name)
super(Activity, self).save()
class meta:
verbose_name_plural = 'Activities'
class Step(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(editable=False)
activity = models.ForeignKey('Activity')
tool = models.ForeignKey('Tool')
items = models.ManyToManyField(Item)
def __unicode__(self):
return self.name
def save(self):
self.slug = slugify(self.name)
super(Step, self).save()
class Tool(MPTTModel):
name = models.CharField(max_length=200)
slug = models.SlugField(editable=False)
parent = models.ForeignKey('self',null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
def save(self):
self.slug = slugify(self.name)
super(Tool, self).save()
class Recipe(MPTTModel):
name = models.CharField(max_length=200)
slug = models.SlugField(editable=False)
parent = models.ForeignKey('self',null=True, blank=True, related_name='children')
steps = models.ManyToManyField(Step)
def __unicode__(self):
return self.name
def save(self):
self.slug = slugify(self.name)
super(Recipe, self).save()
| Python |
from django.core.paginator import Paginator, InvalidPage, EmptyPage
def pagination_helper(request, objects):
paginator = Paginator(objects, 5) # Show 25 contacts per page
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
objects = paginator.page(page)
except (EmptyPage, InvalidPage):
objects = paginator.page(paginator.num_pages)
return objects | Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
import re, sys
from django import template
register = template.Library()
@register.filter(name='pluralize')
def pluralize(value):
"""Polish implementation"""
try:
if int(value) in (12,13,14):
return 'ów'
elif int(value)%10 in (2,3,4):
return 'y'
elif int(value) >= 5 or int(value) == 0:
return 'ów' # utf8
except ValueError: # invalid string that's not a number
pass
except TypeError:
try:
if int(value) in (12,13,14):
return 'ów'
elif int(value)%10 in (2,3,4):
return 'y'
elif int(value) >= 5 or int(value) == 0:
return 'ów' # utf8
except TypeError:
pass
return '' | Python |
from django.shortcuts import render_to_response
from myapp.models import Announcement, Location, User, AnnouncementForm
from django.contrib.auth.decorators import login_required
from myapp.helpers import *
from django.shortcuts import redirect
from datetime import *
# Create your views here.
def index(request):
locations = Location.tree.all()
announcements = Announcement.objects.all()
announcements = pagination_helper(request, announcements)
return render_to_response('index.html', { 'announcements': announcements, 'locations': locations })
def search(request):
locations = Location.tree.all()
announcements = Announcement.objects.filter(text__icontains=request.GET['q'])
announcements = pagination_helper(request, announcements)
return render_to_response('index.html', { 'announcements': announcements, 'locations': locations})
def location(request, location_slug):
locations = Location.tree.all()
location = Location.objects.get(slug=location_slug)
announcements = Announcement.objects.filter(location=location)
announcements = pagination_helper(request, announcements)
return render_to_response('location.html', { 'announcements': announcements, 'location': location , 'locations': locations})
@login_required
def user(request, user_name):
locations = Location.tree.all()
user = User.objects.get(username=user_name)
announcements = Announcement.objects.filter(user=user)
announcements = pagination_helper(request, announcements)
return render_to_response('user.html', { 'user': user, 'announcements': announcements, 'locations': locations })
@login_required
def announcement_add(request):
if request.method == 'POST': # If the form has been submitted...
form = AnnouncementForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
announcement = form.save(commit=False)
announcement.user = request.user
announcement.pub_date=datetime.now()
announcement.votes=0
announcement.save()
location = form.cleaned_data['location']
return redirect('myapp.views.location',location_slug = location.slug) # Redirect after POST
else:
form = AnnouncementForm() # An unbound form
return render_to_response('announcement_add.html', {
'form': form,
})
def announcement_vote_add(request, announcement_id):
announcement = Announcement.objects.get(pk=announcement_id)
announcement.votes += 1
announcement.save()
return render_to_response('announcement_vote_add.html', {'announcement' : announcement})
| Python |
from myapp.models import ItemType, Item, QuantityType, Activity, Step, Tool, Recipe
from django.contrib import admin
admin.site.register(ItemType)
admin.site.register(Item)
admin.site.register(QuantityType)
admin.site.register(Activity)
admin.site.register(Step)
admin.site.register(Tool)
admin.site.register(Recipe) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
from django.contrib import databrowse
# from myapp.models import Announcement, Location
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^Project/', include('Project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
# (r'^databrowse/(.*)', databrowse.site.root),
# (r'^$', 'myapp.views.index'),
# (r'^search/$', 'myapp.views.search'),
# (r'^location/(?P<location_slug>[A-Za-z0-9_]+)$', 'myapp.views.location'),
# (r'^user/(?P<user_name>[A-Za-z0-9_]+)$', 'myapp.views.user'),
# (r'^accounts/login/$', 'django.contrib.auth.views.login',{'template_name': 'login.html'}),
# (r'^announcement-add/$', 'myapp.views.announcement_add'),
# (r'^comment-add/$', 'django.contrib.comments.views.comments.post_comment'),
# (r'^announcement-vote-add/(?P<announcement_id>[0-9]+)$', 'myapp.views.announcement_vote_add'),
(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/media/favicon.ico'})
)
# databrowse.site.register(Announcement)
# databrowse.site.register(Location)
#hack for serving static media on dev server
from django.conf import settings
if settings.DEBUG:
from django.views.static import serve
_media_url = settings.MEDIA_URL
if _media_url.startswith('/'):
_media_url = _media_url[1:]
urlpatterns += patterns('',(r'^%s(?P<path>.*)$' % _media_url,serve,{'document_root': settings.MEDIA_ROOT}))
del(_media_url, serve) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
# Django settings for Project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Dawid Cech', 'dawid.cech@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'sqlite3',
'NAME': 'Project.db',
}
}
EXAMPLE_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Warsaw'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pl'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = 'myapp/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'i*gvp)l66f8c9htc4(awpzjy)4+_(s764#xhbu$l6drf!q!ac$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
'/teplates/'
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.databrowse',
'django.contrib.flatpages',
'django.contrib.comments',
'myapp',
'mptt'
#'south'
)
GOOGLE_MAPS_API_KEY = 'ABQIAAAACl8T4VKrreBUc3WLqYqPsRRZmV3f8ltJLzIdbaEVtj2mjMrbhBTCIuX5TDJr-OmGuyERLwQHB1OLEg'
| Python |
from django.db import models
from mptt.models import MPTTModel
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.forms import ModelForm, Textarea
from slughifi import *
class Location(MPTTModel):
#LOCATION_CHOICES=(('U','Uczelnia'),('S','Szkola'),('P','Przedszkole'),('O','Osiedle'),('M','Miasto'))
lat = models.FloatField()
lng = models.FloatField()
name = models.CharField(max_length=200)
slug = models.SlugField()
#type = models.CharField(max_length=1, choices=LOCATION_CHOICES)
address = models.CharField(max_length=200)
parent = models.ForeignKey('self',null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
# def save(self):
# self.slug = slugify(self.name)
# super(Location, self).save()
class Announcement(models.Model):
text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
votes = models.IntegerField()
location = models.ForeignKey(Location)
user = models.ForeignKey(User)
#logo = models.ImageField(upload_to='logos')
class Meta:
ordering = ['-pub_date']
def __unicode__(self):
return self.text
class AnnouncementForm(ModelForm):
class Meta:
model = Announcement
#fields = ('text', 'location')
exclude = ('pub_date','votes','user')
widgets = {
'text': Textarea(attrs={'cols': 80, 'rows': 10}),
}
| Python |
from django.core.paginator import Paginator, InvalidPage, EmptyPage
def pagination_helper(request, objects):
paginator = Paginator(objects, 5) # Show 25 contacts per page
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
objects = paginator.page(page)
except (EmptyPage, InvalidPage):
objects = paginator.page(paginator.num_pages)
return objects | Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
import re, sys
from django import template
register = template.Library()
@register.filter(name='pluralize')
def pluralize(value):
"""Polish implementation"""
try:
if int(value) in (12,13,14):
return 'ów'
elif int(value)%10 in (2,3,4):
return 'y'
elif int(value) >= 5 or int(value) == 0:
return 'ów' # utf8
except ValueError: # invalid string that's not a number
pass
except TypeError:
try:
if int(value) in (12,13,14):
return 'ów'
elif int(value)%10 in (2,3,4):
return 'y'
elif int(value) >= 5 or int(value) == 0:
return 'ów' # utf8
except TypeError:
pass
return '' | Python |
from django.shortcuts import render_to_response
from myapp.models import Announcement, Location, User, AnnouncementForm
from django.contrib.auth.decorators import login_required
from myapp.helpers import *
from django.shortcuts import redirect
from datetime import *
from django.db.models import Count, Max
# Create your views here.
def index(request):
top_schools = Location.tree.all().annotate(post_count = Count('announcement')).annotate(last_post = Max('announcement__pub_date')).order_by('-post_count')[:5]
latest_posts = Announcement.objects.all().order_by('-pub_date')[:3]
# latest_posts = pagination_helper(request, latest_posts)
return render_to_response('index.html', { 'latest_posts': latest_posts, 'top_schools': top_schools })
def search(request):
locations = Location.tree.all()
announcements = Announcement.objects.filter(text__icontains=request.GET['q'])
announcements = pagination_helper(request, announcements)
return render_to_response('index.html', { 'announcements': announcements, 'locations': locations})
def location(request, location_slug):
locations = Location.tree.all()
location = Location.objects.get(slug=location_slug)
posts = Announcement.objects.filter(location=location)
posts = pagination_helper(request, posts)
return render_to_response('location.html', { 'posts': posts, 'location': location , 'locations': locations})
@login_required
def user(request, user_name):
locations = Location.tree.all()
user = User.objects.get(username=user_name)
announcements = Announcement.objects.filter(user=user)
announcements = pagination_helper(request, announcements)
return render_to_response('user.html', { 'user': user, 'announcements': announcements, 'locations': locations })
@login_required
def announcement_add(request):
if request.method == 'POST': # If the form has been submitted...
form = AnnouncementForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
announcement = form.save(commit=False)
announcement.user = request.user
announcement.pub_date=datetime.now()
announcement.votes=0
announcement.save()
location = form.cleaned_data['location']
return redirect('myapp.views.location',location_slug = location.slug) # Redirect after POST
else:
form = AnnouncementForm() # An unbound form
return render_to_response('announcement_add.html', {
'form': form,
})
def announcement_vote_add(request, announcement_id):
announcement = Announcement.objects.get(pk=announcement_id)
announcement.votes += 1
announcement.save()
return render_to_response('announcement_vote_add.html', {'announcement' : announcement})
| Python |
from myapp.models import Announcement, Location
from django.contrib import admin
admin.site.register(Announcement)
admin.site.register(Location) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
from django.contrib import databrowse
from myapp.models import Announcement, Location
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^Project/', include('Project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^databrowse/(.*)', databrowse.site.root),
(r'^$', 'myapp.views.index'),
(r'^search/$', 'myapp.views.search'),
(r'^location/(?P<location_slug>[A-Za-z0-9-]+)$', 'myapp.views.location'),
(r'^user/(?P<user_name>[A-Za-z0-9-]+)$', 'myapp.views.user'),
(r'^accounts/login/$', 'django.contrib.auth.views.login',{'template_name': 'login.html'}),
(r'^announcement-add/$', 'myapp.views.announcement_add'),
(r'^comment-add/$', 'django.contrib.comments.views.comments.post_comment'),
(r'^announcement-vote-add/(?P<announcement_id>[0-9]+)$', 'myapp.views.announcement_vote_add'),
(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/media/favicon.ico'})
)
databrowse.site.register(Announcement)
databrowse.site.register(Location)
#hack for serving static media on dev server
from django.conf import settings
if settings.DEBUG:
from django.views.static import serve
_media_url = settings.MEDIA_URL
if _media_url.startswith('/'):
_media_url = _media_url[1:]
urlpatterns += patterns('',(r'^%s(?P<path>.*)$' % _media_url,serve,{'document_root': settings.MEDIA_ROOT}))
del(_media_url, serve) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
#!/usr/bin/python2.6
#
# Copyright 2011 Eric Gavaletz <gavaletz@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Eric Gavaletz <gavaletz@gmail.com>'
__credits__ = 'None'
__date__ = '29 April 2011'
__version__ = '0.1'
#TODO Replace equals with str regex
#TODO Store problem lists in hashed binary format
#TODO PyDoc
import sys
from PyQt4 import QtCore, QtGui
import gui
#from gui import Ui_MainWindow
PROBLEM_LIST = "prob.txt"
OVERTIME = 40
MAX_ATTEMPTS = 3
SEED_CAPITAL = 1000
WORK_WEEK = 40
class GameText(object):
"""
Text based interface for a game. This should work with most games as long
as the game exports methods for pulling status, feedback and the user
prompt. The game also needs to have a play method that accepts commands as
strings for input.
"""
def __init__(self, parent=None):
"""
The constructor method. Starts the console with a text based prompt
that is provided as a static field from the game. The user input based
on this prompt is intended to provide the information that the Game
object needs to get up and going.
"""
name = raw_input(Game.welcome)
self.game = Game(name)
self.status(self.game.status)
self.feedback("", self.game.feedback, self.game.prompt)
def play(self):
"""
This is the main loop that keeps the game going.
"""
while 1:
cmd = raw_input(">>>")
self.exec_cmd(cmd)
def quit(self):
"""
Called by submitting the "quit" command. This method ends the game and
closes the application.
"""
print "Goodbye!"
sys.exit()
def abort(self):
"""
Stops the current action sequence in its tracks and returns the game to
its home state with the standard root prompt.
"""
self.game.abort()
self.status(self.game.status)
self.feedback("ABORT", self.game.feedback, self.game.prompt)
def status(self, s):
"""
Controls how the status returned from the Game is displayed.
"""
print "--------------------------\n%s\n--------------------------" % s
def feedback(self, cmd, fb, cp):
"""
Controls how the feedback and optionaly the command prompt are
displayed.
"""
print "%s\n" % fb
print "%s\n" % cp
def exec_cmd(self, cmd):
"""
Takes text as a command from the play method and passes it to the Game.
As a feature this also allows the user to quit without having to go into
the game control logic.
"""
if cmd == "quit" or cmd == "q":
self.quit()
else:
self.game.play(cmd)
self.status(self.game.status)
self.feedback(cmd, self.game.feedback, self.game.prompt)
# End of class GameText
class GameWindow(QtGui.QMainWindow):
"""
GUI based interface for a game. This should work with most games as long
as the game exports methods for pulling status, feedback and the user
prompt. The game also needs to have a play method that accepts commands as
strings for input.
Depends on PyQt-x11-gpl-4.8.3.tar.gz
http://www.riverbankcomputing.co.uk/software/pyqt/download
"""
def __init__(self, parent=None):
"""
The constructor method. Starts the console with a text based prompt
that is provided as a static field from the game. The user input based
on this prompt is intended to provide the information that the Game
object needs to get up and going.
There is also some state change that enables the various parts of the
GUI to serve multiple funations.
"""
QtGui.QWidget.__init__(self, parent)
self.ui = gui.Ui_MainWindow()
self.ui.setupUi(self)
QtCore.QObject.connect(self.ui.submit_button, QtCore.SIGNAL("clicked()"), self.setup)
QtCore.QObject.connect(self.ui.command_line, QtCore.SIGNAL("returnPressed()"), self.setup)
QtCore.QObject.connect(self.ui.actionRetire, QtCore.SIGNAL("triggered()"), self.quit)
self.ui.feedback_box.append(Game.welcome)
def setup(self):
"""
Takes care of the housekeeping and initialization for the GUI window.
"""
name = self.ui.command_line.text()
self.ui.command_line.clear()
self.game = Game(name)
self.ui.feedback_box.clear()
self.status(self.game.status)
self.feedback("", self.game.feedback, self.game.prompt)
QtCore.QObject.disconnect(self.ui.submit_button, QtCore.SIGNAL("clicked()"), self.setup)
QtCore.QObject.disconnect(self.ui.command_line, QtCore.SIGNAL("returnPressed()"), self.setup)
QtCore.QObject.connect(self.ui.submit_button, QtCore.SIGNAL("clicked()"), self.exec_cmd)
QtCore.QObject.connect(self.ui.command_line, QtCore.SIGNAL("returnPressed()"), self.exec_cmd)
QtCore.QObject.connect(self.ui.abort_button, QtCore.SIGNAL("clicked()"), self.abort)
def quit(self):
"""
Method for binding the retire/quit menu option with sys.exit()
"""
sys.exit()
def abort(self):
"""
Stops the current action sequence in its tracks and returns the game to
its home state with the standard root prompt.
"""
self.game.abort()
self.status(self.game.status)
self.feedback("ABORT", self.game.feedback, self.game.prompt)
def status(self, s):
"""
Controls how the status returned from the Game is displayed.
"""
self.ui.status_box.clear()
self.ui.status_box.append(s)
def feedback(self, cmd, fb, cp):
"""
Controls how the feedback and optionaly the command prompt are
displayed.
"""
if not self.ui.actionKeep_Feedback.isChecked():
self.ui.feedback_box.clear()
self.ui.feedback_box.append("%s\n" % cmd)
self.ui.feedback_box.append("%s\n" % fb)
if self.ui.actionShow_Commands.isChecked():
self.ui.feedback_box.append("%s\n" % cp)
else:
self.ui.feedback_box.append("READY:\n")
def exec_cmd(self):
"""
Grabs commands as text from the GUI and passes it to the Game.
As a feature this also allows the user to quit without having to go into
the game control logic.
"""
cmd = self.ui.command_line.text()
if cmd == "quit" or cmd == "q":
self.quit()
else:
self.game.play(cmd)
self.ui.command_line.clear()
self.status(self.game.status)
self.feedback(cmd, self.game.feedback, self.game.prompt)
# End of class GameWindow
class Office(object):
"""
The office object handles the calculations and record keeping to ensure that
the company is paying the right amount for the number of employees that it
has.
"""
b = "basement"
p = "park"
d = "downtown"
s = "scraper"
c = "campus"
types = [b, p, d, s, c]
rent = {b:0, p:1000, d:10000, s:100000, c:1000000}
max_emp = {b:3, p:10, d:30, s:100, c:1000}
desc = {b:"Parent's Basement", p:"Office Park", d:"Downtown Office",
s:"High-rise Building", c:"Corperate Campus"}
def __init__(self, office_kind):
"""
The constructor that takes a string to represnet the type of office that
is being made. It almost always starts out as a basement. The number
of employees is started at zero because the containing company will add
the owner(s).
Accepted kinds: [park, downtown, scraper, basement, campus]
"""
self.num_emp = 0
if office_kind in self.types:
self.kind = office_kind
def rent_bill(self):
"""
Updates the internally heald rent amount and then returns that amount
for monthly expense calculations. The only calculation here that is
uniqe is the calculation of the rent for a campus. That calculation is
1 million for every 1000 employees.
"""
if self.kind == self.c:
r = ((self.num_emp / self.max_emp[self.c]) + 1) * self.rent[self.c]
if r < self.rent[self.c]:
return self.rent[self.c]
else:
return r
else:
return self.rent[self.kind]
def move(self, kind):
"""
Moving changes the kind of office that is held and makes sure that all
of the currently employeed workers will fit in the new building. If
they will not fit then the user is prompted to fire N employees before
attempting the move again.
"""
if kind == self.c:
self.kind = self.c
elif kind in self.types:
if self.num_emp <= self.max_emp[kind]:
self.kind = kind
else:
return "First fire %d employees!" % (self.num_emp - self.max_emp[kind])
else:
return "ERROR: not a recognized office type"
return "Moved into a %s!" % self.kind
def add_emp(self):
"""
If kind == campus then you are always allowed to add more people
otherwise a check is made to make sure you have enough room first.
"""
if self.full():
return "ERROR: Move to a bigger office first."
else:
self.num_emp += 1
def rm_emp(self):
"""
If employees is not zero then an employee is removed.
"""
if self.num_emp > 0:
self.num_emp -= 1
else:
return "ERROR: Trying to remove an employee that doesn't exist."
def full(self):
"""
Booloean ckeck to see if the number of employees is less than the
maximum number of employees for a particular kind.
"""
if self.kind == self.c:
return False
else:
return self.num_emp >= self.max_emp[self.kind]
def __str__(self):
return self.desc[self.kind]
# End of class Office
class Company(object):
"""
The company is the primary data structure and keeps track of the finances,
temporal progress, and basically hold things togeather.
There is contention between the Company and the Problem_list for the
responsibility of dealing with calculating hours and adding them to the
hours worked. For now that has been placed in the Problem_list.
"""
global OVERTIME, SEED_CAPITAL, WORK_WEEK
def __init__(self, name):
"""
Constructor that takes the name of the company (arbitrary string) as an
argument. This also sets up the employee list and office.
"""
self.feedback = ""
self.name = name
self.capital = SEED_CAPITAL
self.employee_list = list()
self.work_week = WORK_WEEK
self.hours = 0
self.weeks = 0
self.months = 0
self.years = 0
self.office = Office(Office.b)
self.hire_employee(Employee(Employee.o))
def move_office(self, kind):
"""
Wrapper for the Office move method.
"""
return self.office.move(kind)
def hire_employee(self, emp):
"""
Takes care of the capital expendatures and the paperwork of hiring an
employee. The company must have enough capital to pay the upfront
recruiting fees but it does not enforce that the company will have
capital to pay the hourly rate. This method also checks to make sure
that the office has space.
In the future this should be refactored to an Employee_list class
function.
"""
if self.capital >= emp.hire_cost:
if not self.office.full():
self.capital -= emp.hire_cost
self.employee_list.append(emp)
self.office.add_emp()
if emp.hours > self.work_week:
self.work_week = emp.hours
else:
return "ERROR: You need to move to a bigger office first."
else:
return "ERROR: You need to raise some more capital first."
def fire_employee(self, kind):
"""
Takes care of the capital expendatures and the paperwork of firing an
employee.
In the future this should be refactored to an Employee_list class
function.
"""
e = None
for i in range(0, len(self.employee_list)):
if self.employee_list[i].kind == kind:
e = self.employee_list.pop(i)
break
if not e:
return -1
else:
self.office.rm_emp()
payout = 0
if e.hours_worked <= OVERTIME:
payout += (e.hours_worked * e.rate)
else:
payout += (OVERTIME * e.rate + ((e.hours_worked - OVERTIME) * (e.rate * e.overtime)))
self.capital -= payout
return payout
def employees_with_avail_hours(self):
"""
Returns a list of employee object that have avail hours to be worked.
In the future this should be refactored to an Employee_list class
function.
"""
tmp = list()
for e in self.employee_list:
if e.hours_worked < e.hours:
tmp.append(e)
return tmp
def avail_hours(self):
"""
Returns a tuple with the total hours (reg, overtime) that the company
has at its disposal.
In the future this should be refactored to an Employee_list class
function.
"""
reg_hours = 0
ot_hours = 0
for e in self.employee_list:
h = e.avail_hours()
reg_hours += h[0]
ot_hours += h[1]
return (reg_hours, ot_hours)
def subtract_hours(self, n):
"""
Calculates the fair share of hours for each employee and deducts them
appropriately. As a side effect (out of necessity) it also returns the
maximum number of hours that any one employee worked for this
transaction. This value is used to advance the time.
In the future this should be refactored to an Employee_list class
function.
"""
hours = n
total_max_share = 0
while int(hours):
max_share = 0
tmp_emp_list = self.employees_with_avail_hours()
fair_share = hours / float(len(tmp_emp_list))
if max_share < fair_share:
max_share = fair_share
for e in tmp_emp_list:
if fair_share > e.hours - e.hours_worked:
hours -= (e.hours - e.hours_worked)
e.hours_worked = e.hours
else:
hours -= fair_share
e.hours_worked += fair_share
total_max_share += max_share
return total_max_share
def run_expenses(self):
"""
Decides which expenses need to be ran and runs them. This also advances
the time.
"""
self.feedback = ""
tmp = self.weekly_expenses()
self.weeks += 1
self.hours -= tmp
if self.weeks >= 4:
self.monthly_expenses()
self.months += 1
self.weeks -= 4
def weekly_expenses(self):
"""
Calculates weekly expenses and deducts them from the capital. The hours
are returned to the worker's pool of avail hours.
"""
weekly_payroll = 0
max_hours_worked = 0
for e in self.employee_list:
if e.hours_worked <= OVERTIME:
weekly_payroll += (e.hours_worked * e.rate)
else:
weekly_payroll += (OVERTIME * e.rate + ((e.hours_worked - OVERTIME) * (e.rate * e.overtime)))
if max_hours_worked < e.hours_worked:
max_hours_worked = e.hours_worked
e.hours_worked = 0
self.capital -= weekly_payroll
self.feedback += "\nWeekly Expenses Paid: payroll = $%d.00" % weekly_payroll
return max_hours_worked
def monthly_expenses(self):
"""
Calculates the monthly expenses and deducts them from the capital.
"""
rent = self.office.rent_bill()
self.capital -= rent
benifits = 0
for e in self.employee_list:
benifits += e.benifits
self.capital -= benifits
self.feedback += "\nMonthly Expenses Paid: rent = $%d.00 and monthly employee benifits = $%d.00" % (rent, benifits)
def get_feedback(self):
"""
Temporary workaround for incompatibility in feedback mechanisms. This
ensures that the feedback (which accumulates in this class) is reset
when it is read.
"""
t = self.feedback
self.feedback = None
return t
def __str__(self):
"""
Returns a string representing the current snapshot of the company's
resources.
"""
ret = list()
ret.append("%s has:" % self.name)
ret.append("capital = $%d.00" % self.capital)
ret.append("there are %d employees in your %s" % (len(self.employee_list), self.office.kind))
ret.append("we are %d hours into week %d of month %d""" % (self.hours, self.weeks, self.months))
ret.append("---------------------------------")
ret.append("| Kind\t| Reg.\t| O.T.\t|")
ret.append("---------------------------------")
for e in self.employee_list:
ret.append("| %s \t| %d\t| %s\t|" % (e.kind, e.avail_hours()[0], e.avail_hours()[1]))
ret.append("---------------------------------")
ret.append("| Total\t| %d\t| %s\t|" % (self.avail_hours()[0], self.avail_hours()[1]))
ret.append("---------------------------------")
return "\n".join(ret)
# End of class Company
class Employee(object):
"""
This serves as the data structure for all of our employees. Instead of
having lots of subclasses for different types of employees this method
instead takes a configuration kind that will determine the other values.
"""
global OVERTIME
i = "intern"
h = "hourly"
s = "salary"
c = "contract"
o = "owner"
types = [i, h, s, c, o]
hc = {i:1000, h:500, s:1000, c:1000, o:0}
hrs = {i:60, h:50, s:80, c:80, o:50}
rt = {i:35, h:40, s:50, c:80, o:35}
ot = {i:1.0, h:1.5, s:0.0, c:1.0, o:1.0}
ben = {i:0, h:100, s:500, c:0, o:0}
def __init__(self, emp_kind):
"""
The default constructor that checks to make sure that the employee type
that is being requested is a vaild one and constructs that employee.
"""
if emp_kind in self.types:
self.kind = emp_kind
self.hours_worked = 0
self.hire_cost = self.hc[self.kind]
self.hours = self.hrs[self.kind]
self.rate = self.rt[self.kind]
self.overtime = self.ot[self.kind]
self.benifits = self.ben[self.kind]
def avail_hours(self):
"""
Returns a tuple of the hours that the employee has left to work
(reg, overtime).
"""
if OVERTIME - self.hours_worked >= 0:
reg_hours = OVERTIME - self.hours_worked
ot_hours = self.hours - OVERTIME
else:
reg_hours = 0
ot_hours = self.hours - self.hours_worked
if reg_hours < 0:
reg_hours = 0
if ot_hours < 0:
ot_hours = 0
return (reg_hours, ot_hours)
def __str__(self):
"""
Returns a string representing a snapshot of the employee's status.
"""
return "%s -- %d hours, $%d.00/hr, %dxOT, $%d.00 in beinfits" % (self.kind, self.hours, self.rate, self.overtime, self.benifits)
# End of class Employee
class Problem_list(object):
"""
Serves as the data structure that holds all of the problems and ensures that
problems are taken out of circulation when completed and that the proper
number of hours are deducted for each attempted problem.
The problem list also maintains the active_problem which acts as an
interface for the problem which is currently being solved.
"""
global MAX_ATTEMPTS
def __init__(self, backing_file):
"""
Creates a list of problems by scanning a backing file. There are two
lists that problems can be in, p_list (for problems not yet attempted)
and a_list for completed problems. In between a single problem may be
stored in the active_problem spot.
"""
self.backing_file = backing_file
self.p_list = list()
self.a_list = list()
self.active_problem = None
self.company = None
self.attempt_count = 0
self.p_list = scan(self.backing_file)
def __quick_check(self, p):
"""
Method that is used by append to make sure that problems that are added
fail on load and do not present a time-bomb to crash the system later.
This is essential because we are reading in problems from a text file.
In the future this should be moved to the problem class so that we can
catch failures sooner, and be more flexible with the types of checks
made on subclasses of Problem.
"""
is_good = True
if type(p.title) != str:
is_good = False
if type(p.cost) != int:
is_good = False
if type(p.payoff) != int:
is_good = False
if type(p.desc) != str:
is_good = False
if type(p.solution_format) != str:
is_good = False
if p.solution is None:
is_good = False
else:
if p.solution_format == "int":
if type(p.solution) != int:
is_good = False
elif p.solution_format == "float":
if type(p.solution) != float:
is_good = False
elif p.solution_format == "string":
if type(p.solution) != str:
is_good = False
else:
is_good = False
if not is_good:
print "ERROR: problem did not pass quick_check"
return is_good
def append(self, p):
"""
Performs a quick check to make sure that the problem is as vaild as
possible, and adds it to the list of problems. We try to avoid
duplicates by making sure that the title of the problem does not match
the title of an existing problem.
"""
if self.__quick_check(p):
for op in self.p_list:
if p.title == op.title:
return "ERROR: problem not added -- existing title = \"%s\"" % p.title
self.p_list.append(p)
def list_all(self):
"""
Returns the intro for every problem in the p_list.
"""
ptl = list()
if len(self.p_list):
for i in range(0, len(self.p_list)):
ptl.append("%d\t%s" % (i+1, self.p_list[i].intro()))
return "\n".join(ptl)
else:
return "ERROR: empty problem list"
def attempt(self, i):
"""
First this makes sure that you have enough hours to work the problem,
and if that passes we make sure that you are not already working a
problem (FSM failure). Then the problem is popped from p_list and
placed in active_problem.
"""
h = self.company.avail_hours()
if self.active_problem is not None:
return "ERROR: already working on a problem"
elif i-1 not in range(0, len(self.p_list)):
return "ERROR: %d is out of the range of problems" % i
elif self.p_list[i-1].cost > h[0] + h[1]:
return "ERROR: Not enough hours to attempt this problem. Maybe it is time for a weekend?\n"
else:
self.active_problem = self.p_list.pop(i-1)
return self.active_problem.display()
def answer(self, a):
"""
Keeps track of the number of failed attempts to answer a problem and
checks submissions to problems.
"""
self.attempt_count += 1
h = self.company.avail_hours()
if self.active_problem.cost <= h[0] + h[1]:
result = self.active_problem.answer(a)
if type(result) != str and result:
self.company.hours += self.company.subtract_hours(self.active_problem.cost)
self.company.capital += self.active_problem.payoff
self.a_list.append(self.active_problem)
self.active_problem = None
self.attempt_count = 0
elif self.attempt_count >= MAX_ATTEMPTS:
self.company.hours += self.company.subtract_hours(self.active_problem.cost)
self.a_list.append(self.active_problem)
self.active_problem = None
self.attempt_count = 0
if self.company.hours >= self.company.work_week:
return "MAX ATTEMPTS Exceeded\n%s" % self.company.run_expenses()
else:
return "MAX ATTEMPTS Exceeded"
else:
return result
else:
return "Not enough hours to attempt this problem. Maybe it is time for a weekend?\n"
if self.company.hours >= self.company.work_week:
self.company.run_expenses()
return result
else:
return result
def abort(self):
"""
Because the Problem_list is state-full this is required to maintain
harmony in the case that the user calls an abort in the main part of the
game.
"""
self.attempt_count = 0
if self.active_problem:
self.a_list.append(self.active_problem)
self.active_problem = None
def size(self):
"""
Returns the lenght of the p_list.
"""
return len(self.p_list)
# End of class Problem_list
class Problem(object):
"""
A data structure for holding problem details. There are also methods for
displaying a short intorduction of the problem and for displaying detailed
information. The answer method allows for the problem to check on its own
to see if the provided answer matches the known solution. The nice thing
about doing this here is that it allows for easy additions of new types of
problems with more complex answers by extending Problem.
"""
def __init__(self, title, cost, payoff, desc, solution_format, solution):
"""
Constructor that takes in the data that constitutes a problem. In the
future the __quick_check will be moved from Problem_list to here.
"""
self.title = title
self.cost = cost
self.payoff = payoff #value
self.desc = desc
self.solution_format = solution_format
self.solution = solution
def intro(self):
"""
Returns a one line summary string of a problem for list displays.
"""
return "-%d hrs.\t+$%d.00\t| %s" % (self.cost, self.payoff,
self.title)
def display(self):
"""
Returns a multi-line string that provides the full problem text.
"""
return "%s\nCOST: %d hrs.\nRETURN: $%d.00\n----------------------------------\n%s" % (self.title, self.cost, self.payoff, self.desc)
def answer(self, a):
"""
Method that checks a potential answer. In the future we should look at
the way we are handling bad input as the return types from this method
can be confusing.
"""
if self.solution_format == "int":
try:
ans = int(a)
except:
return "ERROR: please enter an integer"
elif self.solution_format == "float":
try:
ans = float(a)
except:
return "ERROR: please enter a decimal number"
elif self.solution_format == "string":
ans = a
else:
ans = a
return self.solution == ans
def __str__(self):
"""
Wrapper that provides the standard sting interface by sending back the
intro.
"""
return self.intro()
# End of class Problem
class Game(object):
"""
This is the little engiene that could. This class is the highest level of
the game that is specific to FirstHack. It controls the rules and the flow
of the game. This is probably the most complex part of the code and most
likely to break. Changes here should be made with care.
"""
global PROBLEM_LIST
ei = Employee(Employee.i)
eh = Employee(Employee.h)
es = Employee(Employee.s)
ec = Employee(Employee.c)
eo = Employee(Employee.o)
welcome = "What is your company's name?\n"
home_state = "action"
root_prompt = "(a)ttempt problem, (h)ire employee, (f)ire employee, (m)ove office, (w)eekend, or (q)uit"
def __init__(self, name):
"""
Primary constructor for a instance of the game. Takes a string for the
name of the company as an argument and creates a company with that name.
Also creates a problem list with the backing file pointed to by the
global PROBLEM_LIST.
"""
self.name = name
self.feedback = "Your Comapany name is \"%s\".\nLet's get going!\nWe have a lot of work to do...\n" % name
self.prompt = self.root_prompt
self.company0 = Company(name)
self.problem_list = Problem_list(PROBLEM_LIST)
self.problem_list.company = self.company0
self.state = self.home_state
self.status = self.company0.__str__()
def abort(self):
"""
Allows the user to go back to the home_state and resets things cleanly.
"""
self.state = self.home_state
self.feedback = "Aborting..."
self.prompt = self.root_prompt
self.problem_list.abort()
def play(self, cmd):
"""
This is the primary state machine that drives the game. Handle with
care.
"""
if self.state == self.home_state:
if cmd == "a":
if self.problem_list.size():
self.state = "problem"
self.feedback = self.problem_list.list_all()
self.prompt = "Select a problem by number or \'a\' to abort:"
else:
self.state = self.home_state
self.feedback = "Sorry there are no problems to be worked..."
elif cmd == "h":
self.state = "hire"
self.feedback = "Let's bring in new talent!"
self.prompt = "hire an (i)ntern, (h)ourly, (s)alary, or (c)ontract?"
elif cmd == "f":
self.state = "fire"
self.feedback = "Time to take out the trash..."
self.prompt = "fire an (i)ntern, (h)ourly, (s)alary, or (c)ontract?"
elif cmd == "m":
self.state = "move"
self.feedback = "Renting a new pad"
self.prompt = "Do you want a (p)ark, (d)owntown office, (s)ky scraper, (b)asement, or (c)ampus?"
elif cmd == "w":
reply = self.company0.run_expenses()
if reply:
self.feedback = "Enjoy the weekend!\n%s" % reply
else:
self.feedback = "Enjoy the weekend!"
self.state = self.home_state
else:
self.feedback = "What? Try again..."
return
elif self.state == "problem":
if cmd == "a":
self.abort()
return
try:
number = int(cmd)
except:
self.feedback = "%s\n\nWhat kind of number is \'%s\'? Try again..." % (self.problem_list.list_all(), cmd)
return
reply = self.problem_list.attempt(number)
if reply.startswith("ERROR"):
self.feedback = "%s\n\n%s" % (self.problem_list.list_all(), reply)
return
else:
self.feedback = reply
self.prompt = "Please enter your answer or \'a\' to abort:"
self.state = "answer"
elif self.state == "answer":
if cmd == "a":
self.abort()
return
reply = self.problem_list.answer(cmd)
if type(reply) != str and reply:
self.feedback = "Correct!"
self.state = self.home_state
elif type(reply) == str and reply.startswith("MAX"):
self.feedback = reply
self.state = self.home_state
else:
self.feedback = "%s\n\n%s -- Try agin..." % (self.problem_list.active_problem.display(), reply)
return
elif self.state == "hire":
if cmd == "i":
e = self.ei
elif cmd == "h":
e = self.eh
elif cmd == "s":
e = self.es
elif cmd == "c":
e = self.ec
elif cmd == "o":
e = self.eo
else:
self.feedback = "What? Try again..."
return
error = self.company0.hire_employee(Employee(e.kind))
if error:
self.feedback = error
self.state = self.home_state
else:
self.feedback = "This guy will cost you $%d.00 and %d.00 per hour with a maximum of %d hours" % (e.hire_cost, e.rate, e.hours)
self.state = self.home_state
elif self.state == "fire":
if cmd == "i":
e = self.ei
elif cmd == "h":
e = self.eh
elif cmd == "s":
e = self.es
elif cmd == "c":
e = self.ec
elif cmd == "o":
e = self.eo
else:
self.feedback = "What? Try again..."
return
payout = self.company0.fire_employee(e.kind)
if payout >= 0:
self.feedback = "Payout for the %s that was fired was $%d.00" % (e.kind, payout)
self.state = self.home_state
else:
self.feedback = "Couldn't find an employee on record of type %s" % e.kind
self.state = self.home_state
elif self.state == "move":
if cmd == "p":
self.feedback = self.company0.move_office(Office.p)
self.state = self.home_state
elif cmd == "d":
self.beedback = self.company0.move_office(Office.d)
self.state = self.home_state
elif cmd == "s":
self.feedback = self.company0.move_office(Office.s)
self.state = self.home_state
elif cmd == "b":
self.feedback = self.company0.move_office(Office.b)
self.state = self.home_state
elif cmd == "c":
self.feedback = self.company0.move_office(Office.c)
self.state = self.home_state
else:
self.feedback = "What? Try again..."
return
if self.state == self.home_state:
self.prompt = self.root_prompt
self.status = self.company0.__str__()
if self.company0.feedback is not None:
self.feedback = "%s\n%s" % (self.feedback, self.company0.get_feedback())
# End of class Game
def scan(pfile):
"""
This method is used to scan in a text file containing problem definitions.
The structure of the file is as follows:
#title - str
#cost - int
#value - int
#desc - str
#solution_format - str
#solution - ?
########################
World's Hardest Easy Geometry Problem
1000
2000
Using only elementary geometry, determine angle x.
int
30
Note that there is very little markup in the file other than the fact that
lines starting with a # are interpred as comments. This means that the
sturcture of the file is key to it being parsed correctly.
"""
f = open(pfile, "r")
pl = list()
lines = list()
for l in f:
if not l.startswith("#"):
lines.append(l.strip())
f.close()
while len(lines):
title = lines.pop(0)
cost = int(lines.pop(0))
payoff = int(lines.pop(0))
desc = lines.pop(0)
solution_format = lines.pop(0)
solution = None
if solution_format == "int":
solution = int(lines.pop(0))
elif solution_format == "float":
solution = float(lines.pop(0))
else:
solution = lines.pop(0)
p = Problem(title, cost, payoff, desc, solution_format, solution)
pl.append(p)
return pl
if __name__ == "__main__":
if len(sys.argv) == 2:
if sys.argv[1] == "-t":
myapp = GameText()
myapp.play()
else:
app = QtGui.QApplication(sys.argv)
myapp = GameWindow()
myapp.show()
sys.exit(app.exec_())
| Python |
#!/usr/bin/python2.6
#
# Copyright 2011 Eric Gavaletz <gavaletz@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.game_window = QtGui.QWidget(MainWindow)
self.game_window.setObjectName(_fromUtf8("game_window"))
self.gridLayout_5 = QtGui.QGridLayout(self.game_window)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.game_split = QtGui.QSplitter(self.game_window)
self.game_split.setOrientation(QtCore.Qt.Vertical)
self.game_split.setObjectName(_fromUtf8("game_split"))
self.console_split = QtGui.QSplitter(self.game_split)
self.console_split.setOrientation(QtCore.Qt.Horizontal)
self.console_split.setObjectName(_fromUtf8("console_split"))
self.feedback_area = QtGui.QGroupBox(self.console_split)
self.feedback_area.setObjectName(_fromUtf8("feedback_area"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.feedback_area)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.feedback_box = QtGui.QTextEdit(self.feedback_area)
self.feedback_box.setReadOnly(True)
self.feedback_box.setObjectName(_fromUtf8("feedback_box"))
self.horizontalLayout_4.addWidget(self.feedback_box)
self.status_area = QtGui.QGroupBox(self.console_split)
self.status_area.setObjectName(_fromUtf8("status_area"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.status_area)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.status_box = QtGui.QTextEdit(self.status_area)
self.status_box.setReadOnly(True)
self.status_box.setObjectName(_fromUtf8("status_box"))
self.horizontalLayout_3.addWidget(self.status_box)
self.command_area = QtGui.QGroupBox(self.game_split)
self.command_area.setObjectName(_fromUtf8("command_area"))
self.horizontalLayout = QtGui.QHBoxLayout(self.command_area)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.command_line = QtGui.QLineEdit(self.command_area)
self.command_line.setObjectName(_fromUtf8("command_line"))
self.horizontalLayout.addWidget(self.command_line)
self.submit_button = QtGui.QPushButton(self.command_area)
self.submit_button.setObjectName(_fromUtf8("submit_button"))
self.horizontalLayout.addWidget(self.submit_button)
self.abort_button = QtGui.QPushButton(self.command_area)
self.abort_button.setObjectName(_fromUtf8("abort_button"))
self.horizontalLayout.addWidget(self.abort_button)
self.gridLayout_5.addWidget(self.game_split, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.game_window)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFirst_Hack = QtGui.QMenu(self.menubar)
self.menuFirst_Hack.setObjectName(_fromUtf8("menuFirst_Hack"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menuView = QtGui.QMenu(self.menubar)
self.menuView.setObjectName(_fromUtf8("menuView"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionShow_Commands = QtGui.QAction(MainWindow)
self.actionShow_Commands.setCheckable(True)
self.actionShow_Commands.setChecked(True)
self.actionShow_Commands.setObjectName(_fromUtf8("actionShow_Commands"))
self.actionRetire = QtGui.QAction(MainWindow)
self.actionRetire.setObjectName(_fromUtf8("actionRetire"))
self.actionKeep_Feedback = QtGui.QAction(MainWindow)
self.actionKeep_Feedback.setCheckable(True)
self.actionKeep_Feedback.setObjectName(_fromUtf8("actionKeep_Feedback"))
self.actionEcho_Commands = QtGui.QAction(MainWindow)
self.actionEcho_Commands.setCheckable(True)
self.actionEcho_Commands.setChecked(False)
self.actionEcho_Commands.setObjectName(_fromUtf8("actionEcho_Commands"))
self.menuFirst_Hack.addAction(self.actionRetire)
self.menuView.addAction(self.actionShow_Commands)
self.menuView.addAction(self.actionKeep_Feedback)
self.menuView.addAction(self.actionEcho_Commands)
self.menubar.addAction(self.menuFirst_Hack.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.feedback_area.setTitle(QtGui.QApplication.translate("MainWindow", "Feedback", None, QtGui.QApplication.UnicodeUTF8))
self.feedback_box.setToolTip(QtGui.QApplication.translate("MainWindow", "Feedback from the system", None, QtGui.QApplication.UnicodeUTF8))
self.status_area.setTitle(QtGui.QApplication.translate("MainWindow", "Status", None, QtGui.QApplication.UnicodeUTF8))
self.status_box.setToolTip(QtGui.QApplication.translate("MainWindow", "Info about your company", None, QtGui.QApplication.UnicodeUTF8))
self.command_area.setTitle(QtGui.QApplication.translate("MainWindow", "Commands", None, QtGui.QApplication.UnicodeUTF8))
self.command_line.setToolTip(QtGui.QApplication.translate("MainWindow", "Enter commands here based on feedback received", None, QtGui.QApplication.UnicodeUTF8))
self.submit_button.setToolTip(QtGui.QApplication.translate("MainWindow", "You know, you can just hit enter right?", None, QtGui.QApplication.UnicodeUTF8))
self.submit_button.setText(QtGui.QApplication.translate("MainWindow", "Submit", None, QtGui.QApplication.UnicodeUTF8))
self.abort_button.setToolTip(QtGui.QApplication.translate("MainWindow", "Stop what you are doing!", None, QtGui.QApplication.UnicodeUTF8))
self.abort_button.setText(QtGui.QApplication.translate("MainWindow", "Abort", None, QtGui.QApplication.UnicodeUTF8))
self.menuFirst_Hack.setTitle(QtGui.QApplication.translate("MainWindow", "First-Hack", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("MainWindow", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuView.setTitle(QtGui.QApplication.translate("MainWindow", "Settings", None, QtGui.QApplication.UnicodeUTF8))
self.actionShow_Commands.setText(QtGui.QApplication.translate("MainWindow", "Show Commands", None, QtGui.QApplication.UnicodeUTF8))
self.actionRetire.setText(QtGui.QApplication.translate("MainWindow", "Retire", None, QtGui.QApplication.UnicodeUTF8))
self.actionKeep_Feedback.setText(QtGui.QApplication.translate("MainWindow", "Keep Feedback", None, QtGui.QApplication.UnicodeUTF8))
self.actionEcho_Commands.setText(QtGui.QApplication.translate("MainWindow", "Echo Commands", None, QtGui.QApplication.UnicodeUTF8))
| Python |
#!/usr/bin/python2.6
#
# Copyright 2011 Eric Gavaletz <gavaletz@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['FirstHack.py']
DATA_FILES = ['--iconfile',]
OPTIONS = {'argv_emulation': False, 'iconfile': 'fh.icns',
'includes': ['sip', 'PyQt4', 'PyQt4.QtCore', 'PyQt4.QtGui'],
'excludes': ['PyQt4.QtDesigner', 'PyQt4.QtNetwork', 'PyQt4.QtOpenGL', 'PyQt4.QtScript', 'PyQt4.QtSql', 'PyQt4.QtTest', 'PyQt4.QtWebKit', 'PyQt4.QtXml', 'PyQt4.phonon']}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| Python |
#!/usr/bin/python2.6
#
# Copyright 2011 Eric Gavaletz <gavaletz@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Eric Gavaletz <gavaletz@gmail.com>'
__credits__ = 'None'
__date__ = '22 February 2011'
__version__ = '$Revision: 1 $'
import sys
import re
import shelve
import FirstHack
def scan(pfile, back_file):
"""
#title - str
#cost - int
#value - int
#desc - str
#solution_format - str
#solution - ?
########################
World's Hardest Easy Geometry Problem
1000
2000
Using only elementary geometry, determine angle x.
int
30
"""
f = open(pfile, "r")
pl = FirstHack.Problem_list(back_file)
lines = list()
for l in f:
if not l.startswith("#"):
lines.append(l.strip())
while len(lines):
title = lines.pop(0)
cost = int(lines.pop(0))
payoff = int(lines.pop(0))
desc = lines.pop(0)
solution_format = lines.pop(0)
solution = None
if solution_format == "int":
solution = int(lines.pop(0))
elif solution_format == "float":
solution = float(lines.pop(0))
else:
solution = lines.pop(0)
p = FirstHack.Problem(title, cost, payoff, desc, solution_format, solution)
pl.append(p)
return pl
def verify_and_write(plist):
"""
Ask before we clobber someone's file...its called being polite.
"""
print plist.list_all()
print "\nThere are %d problems ready to be writen to %s\n" % (len(plist.p_list),
plist.backing_file)
ok = raw_input("Is this OK? y/n:")
if ok == "y":
plist.write()
else:
print "ABORTING"
def usage():
"""
Helping n00bs since day one...
"""
print "Use the source luke..."
print "python %s <problem_file> -o <backing_file.prob>" % sys.argv[0]
print "backing_file.prob must end with \".prob\""
sys.exit()
def main():
"""
This is where we start :-)
"""
if len(sys.argv) < 4:
usage()
if sys.argv[2] != "-o":
usage()
if not sys.argv[3].endswith(".prob"):
usage()
pfile = sys.argv[1]
back_file = sys.argv[3]
plist = scan(pfile, back_file)
verify_and_write(plist)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.6
#
# Copyright 2011 Eric Gavaletz <gavaletz@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
| Python |
import sys
import os
import xbmc
import xbmcgui
import urllib
import feedparser
import socket
_ = sys.modules[ "__main__" ].__language__
__scriptname__ = sys.modules[ "__main__" ].__scriptname__
__version__ = sys.modules[ "__main__" ].__version__
__addon__ = sys.modules[ "__main__" ].__addon__
_URL = "https://mail.google.com/gmail/feed/atom"
STATUS_LABEL = 100
EMAIL_LIST = 120
CANCEL_DIALOG = ( 9, 10, 92, 216, 247, 257, 275, 61467, 61448, )
class GUI( xbmcgui.WindowXMLDialog ):
def __init__( self, *args, **kwargs ):
pass
def onInit( self ):
self.setup_all()
def _get_urlopener(self):
urlopener = _FancyURLopener(__addon__.getSetting( "user" ), __addon__.getSetting( "pass" ))
self.getControl( STATUS_LABEL ).setLabel( _(610) + "...")
urlopener.addheaders = [('User-agent', "urllib/1.0 (urllib)")]
return urlopener
def setup_all( self ):
self.getControl( STATUS_LABEL ).setLabel( _(610) )
self.getControl( EMAIL_LIST ).reset()
if len(__addon__.getSetting( "user" )) > 3 and len(__addon__.getSetting( "pass" )) > 4:
try:
atom_feed = self._get_urlopener().open(_URL)
atom = feedparser.parse(atom_feed)
self.atom = atom
self.entries = []
self.getControl( STATUS_LABEL ).setLabel( atom.feed.title)
entries = len(atom.entries)
if entries > 0:
self.getControl( 101 ).setLabel( _(611) % (entries, "" if entries == 1 else "s",))
for i in xrange(len(atom.entries)):
title = atom.entries[i].author.split('(', 1)[0]
listitem = xbmcgui.ListItem( label2=(atom.entries[i].title, 50)[0], label=atom.entries[i].author.split('(', 1)[0])
listitem.setProperty( "summary", atom.entries[i].summary )
listitem.setProperty( "updated", _(612) % atom.entries[i].updated.replace("T", " At ").replace("Z","") )
self.getControl( EMAIL_LIST ).addItem( listitem )
self.setFocus( self.getControl( EMAIL_LIST ) )
self.getControl( EMAIL_LIST ).selectItem( 0 )
except:
self.getControl( STATUS_LABEL ).setLabel( _(613) )
else:
self.getControl( STATUS_LABEL ).setLabel( _(614) )
socket.setdefaulttimeout(None)
def onClick( self, controlId ):
pass
def onFocus( self, controlId ):
self.controlId = controlId
def onAction( self, action ):
if ( action.getId() in CANCEL_DIALOG):
self.close()
class _FancyURLopener(urllib.FancyURLopener):
def __init__(self, usr, pwd, prx={}):
urllib.FancyURLopener.__init__(self,prx)
self.usr = usr
self.pwd = pwd
def prompt_user_passwd(self, host, realm):
return (self.usr,self.pwd)
| Python |
import os
import sys
import xbmcaddon
__author__ = "amet"
__scriptid__ = "script.gmail.checker"
__scriptname__ = "GmailChecker"
__addon__ = xbmcaddon.Addon(id=__scriptid__)
__cwd__ = __addon__.getAddonInfo('path')
__version__ = __addon__.getAddonInfo('version')
__language__ = __addon__.getLocalizedString
__profile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') )
__resource__ = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ) )
sys.path.append (__resource__)
if __name__ == "__main__":
import gui
ui = gui.GUI( "script-GmailChecker-main.xml" , __cwd__, "Default")
ui.doModal()
del ui
sys.modules.clear()
print "Finish"
| Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
import os
import httplib2
import sessions
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build_from_document
from apiclient.http import MediaUpload
from oauth2client import client
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
APIS_BASE = 'https://www.googleapis.com'
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
CODE_PARAMETER = 'code'
STATE_PARAMETER = 'state'
SESSION_SECRET = open('session.secret').read()
DRIVE_DISCOVERY_DOC = open('drive.json').read()
USERS_DISCOVERY_DOC = open('users.json').read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials."""
credentials = CredentialsProperty()
def CreateOAuthFlow(request):
"""Create OAuth2.0 flow controller
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = client.flow_from_clientsecrets('client-debug.json', scope='')
flow.redirect_uri = request.url.split('?', 1)[0].rstrip('/')
return flow
def GetCodeCredentials(request):
"""Create OAuth2.0 credentials by extracting a code and performing OAuth2.0.
Args:
request: HTTP request used for extracting an authorization code.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
code = request.get(CODE_PARAMETER)
if code:
oauth_flow = CreateOAuthFlow(request)
creds = oauth_flow.step2_exchange(code)
users_service = CreateService(USERS_DISCOVERY_DOC, creds)
userid = users_service.userinfo().get().execute().get('id')
request.session.set_secure_cookie(name='userid', value=userid)
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(request):
"""Get OAuth2.0 credentials for an HTTP session.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
userid = request.session.get_secure_cookie(name='userid')
if userid:
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
if creds and not creds.invalid:
return creds
def CreateService(discovery_doc, creds):
"""Create a Google API service.
Args:
discovery_doc: Discovery doc used to configure service.
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
http = httplib2.Http()
creds.authorize(http)
return build_from_document(discovery_doc, APIS_BASE, http=http)
def RedirectAuth(handler):
"""Redirect a handler to an authorization page.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = CreateOAuthFlow(handler.request)
flow.scope = ALL_SCOPES
uri = flow.step1_get_authorize_url(flow.redirect_uri)
handler.redirect(uri)
def CreateDrive(handler):
"""Create a fully authorized drive service for this handler.
Args:
handler: RequestHandler from which drive service is generated.
Returns:
Authorized drive service, generated from the handler request.
"""
request = handler.request
request.session = sessions.LilCookies(handler, SESSION_SECRET)
creds = GetCodeCredentials(request) or GetSessionCredentials(request)
if creds:
return CreateService(DRIVE_DISCOVERY_DOC, creds)
else:
RedirectAuth(handler)
def ServiceEnabled(view):
"""Decorator to inject an authorized service into an HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
response_data = view(handler, service)
handler.response.headers['Content-Type'] = 'text/html'
handler.response.out.write(response_data)
return ServiceDecoratedView
def ServiceEnabledJson(view):
"""Decorator to inject an authorized service into a JSON HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
if handler.request.body:
data = json.loads(handler.request.body)
else:
data = None
response_data = json.dumps(view(handler, service, data))
handler.response.headers['Content-Type'] = 'application/json'
handler.response.out.write(response_data)
return ServiceDecoratedView
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
self.ParseState(state)
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get(STATE_PARAMETER))
def ParseState(self, state):
"""Parse a state parameter and set internal values.
Args:
state: State parameter to parse.
"""
if state.startswith('{'):
self.ParseJsonState(state)
else:
self.ParsePlainState(state)
def ParseJsonState(self, state):
"""Parse a state parameter that is JSON.
Args:
state: State parameter to parse
"""
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
def ParsePlainState(self, state):
"""Parse a state parameter that is a plain resource id or missing.
Args:
state: State parameter to parse
"""
if state:
self.action = 'open'
self.ids = [state]
else:
self.action = 'create'
self.ids = []
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def RenderTemplate(name, **context):
"""Render a named template in a context.
Args:
name: Template name.
context: Keyword arguments to render as template variables.
"""
return template.render(name, context)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
# Add the library location to the path
import sys
sys.path.insert(0, 'lib')
import os
import httplib2
import sessions
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build
from apiclient.http import MediaUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
def SibPath(name):
"""Generate a path that is a sibling of this file.
Args:
name: Name of sibling file.
Returns:
Path to sibling file.
"""
return os.path.join(os.path.dirname(__file__), name)
# Load the secret that is used for client side sessions
# Create one of these for yourself with, for example:
# python -c "import os; print os.urandom(64)" > session-secret
SESSION_SECRET = open(SibPath('session.secret')).read()
INDEX_HTML = open(SibPath('index.html')).read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials.
The CredentialsProperty is provided by the Google API Python Client, and is
used by the Storage classes to store OAuth 2.0 credentials in the data store."""
credentials = CredentialsProperty()
def CreateService(service, version, creds):
"""Create a Google API service.
Load an API service from a discovery document and authorize it with the
provided credentials.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
# Instantiate an Http instance
http = httplib2.Http()
# Authorize the Http instance with the passed credentials
creds.authorize(http)
# Build a service from the passed discovery document path
return build(service, version, http=http)
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
"""Create a new instance of drive state.
Parse and load the JSON state parameter.
Args:
state: State query parameter as a string.
"""
if state:
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
else:
self.action = 'create'
self.ids = []
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get('state'))
class BaseDriveHandler(webapp.RequestHandler):
"""Base request handler for drive applications.
Adds Authorization support for Drive.
"""
def CreateOAuthFlow(self):
"""Create OAuth2.0 flow controller
This controller can be used to perform all parts of the OAuth 2.0 dance
including exchanging an Authorization code.
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = flow_from_clientsecrets('client_secrets.json', scope='')
# Dynamically set the redirect_uri based on the request URL. This is extremely
# convenient for debugging to an alternative host without manually setting the
# redirect URI.
flow.redirect_uri = self.request.url.split('?', 1)[0].rsplit('/', 1)[0]
return flow
def GetCodeCredentials(self):
"""Create OAuth 2.0 credentials by extracting a code and performing OAuth2.0.
The authorization code is extracted form the URI parameters. If it is absent,
None is returned immediately. Otherwise, if it is present, it is used to
perform step 2 of the OAuth 2.0 web server flow.
Once a token is received, the user information is fetched from the userinfo
service and stored in the session. The token is saved in the datastore against
the user ID received from the userinfo service.
Args:
request: HTTP request used for extracting an authorization code and the
session information.
Returns:
OAuth2.0 credentials suitable for authorizing clients or None if
Authorization could not take place.
"""
# Other frameworks use different API to get a query parameter.
code = self.request.get('code')
if not code:
# returns None to indicate that no code was passed from Google Drive.
return None
# Auth flow is a controller that is loaded with the client information,
# including client_id, client_secret, redirect_uri etc
oauth_flow = self.CreateOAuthFlow()
# Perform the exchange of the code. If there is a failure with exchanging
# the code, return None.
try:
creds = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return None
# Create an API service that can use the userinfo API. Authorize it with our
# credentials that we gained from the code exchange.
users_service = CreateService('oauth2', 'v2', creds)
# Make a call against the userinfo service to retrieve the user's information.
# In this case we are interested in the user's "id" field.
userid = users_service.userinfo().get().execute().get('id')
# Store the user id in the user's cookie-based session.
session = sessions.LilCookies(self, SESSION_SECRET)
session.set_secure_cookie(name='userid', value=userid)
# Store the credentials in the data store using the userid as the key.
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(self):
"""Get OAuth 2.0 credentials for an HTTP session.
If the user has a user id stored in their cookie session, extract that value
and use it to load that user's credentials from the data store.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
# Try to load the user id from the session
session = sessions.LilCookies(self, SESSION_SECRET)
userid = session.get_secure_cookie(name='userid')
if not userid:
# return None to indicate that no credentials could be loaded from the
# session.
return None
# Load the credentials from the data store, using the userid as a key.
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
# if the credentials are invalid, return None to indicate that the credentials
# cannot be used.
if creds and creds.invalid:
return None
return creds
def RedirectAuth(self):
"""Redirect a handler to an authorization page.
Used when a handler fails to fetch credentials suitable for making Drive API
requests. The request is redirected to an OAuth 2.0 authorization approval
page and on approval, are returned to application.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = self.CreateOAuthFlow()
# Manually add the required scopes. Since this redirect does not originate
# from the Google Drive UI, which authomatically sets the scopes that are
# listed in the API Console.
flow.scope = ALL_SCOPES
# Create the redirect URI by performing step 1 of the OAuth 2.0 web server
# flow.
uri = flow.step1_get_authorize_url(flow.redirect_uri)
# Perform the redirect.
self.redirect(uri)
def RespondJSON(self, data):
"""Generate a JSON response and return it to the client.
Args:
data: The data that will be converted to JSON to return.
"""
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
def CreateAuthorizedService(self, service, version):
"""Create an authorize service instance.
The service can only ever retrieve the credentials from the session.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
Returns:
Authorized service or redirect to authorization flow if no credentials.
"""
# For the service, the session holds the credentials
creds = self.GetSessionCredentials()
if creds:
# If the session contains credentials, use them to create a Drive service
# instance.
return CreateService(service, version, creds)
else:
# If no credentials could be loaded from the session, redirect the user to
# the authorization page.
self.RedirectAuth()
def CreateDrive(self):
"""Create a drive client instance."""
return self.CreateAuthorizedService('drive', 'v2')
def CreateUserInfo(self):
"""Create a user info client instance."""
return self.CreateAuthorizedService('oauth2', 'v2')
class MainPage(BaseDriveHandler):
"""Web handler for the main page.
Handles requests and returns the user interface for Open With and Create
cases. Responsible for parsing the state provided from the Drive UI and acting
appropriately.
"""
def get(self):
"""Handle GET for Create New and Open With.
This creates an authorized client, and checks whether a resource id has
been passed or not. If a resource ID has been passed, this is the Open
With use-case, otherwise it is the Create New use-case.
"""
# Generate a state instance for the request, this includes the action, and
# the file id(s) that have been sent from the Drive user interface.
drive_state = DriveState.FromRequest(self.request)
if drive_state.action == 'open' and len(drive_state.ids) > 0:
code = self.request.get('code')
if code:
code = '?code=%s' % code
self.redirect('/#edit/%s%s' % (drive_state.ids[0], code))
return
# Fetch the credentials by extracting an OAuth 2.0 authorization code from
# the request URL. If the code is not present, redirect to the OAuth 2.0
# authorization URL.
creds = self.GetCodeCredentials()
if not creds:
return self.RedirectAuth()
# Extract the numerical portion of the client_id from the stored value in
# the OAuth flow. You could also store this value as a separate variable
# somewhere.
client_id = self.CreateOAuthFlow().client_id.split('.')[0].split('-')[0]
self.RenderTemplate()
def RenderTemplate(self):
"""Render a named template in a context."""
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(INDEX_HTML)
class ServiceHandler(BaseDriveHandler):
"""Web handler for the service to read and write to Drive."""
def post(self):
"""Called when HTTP POST requests are received by the web application.
The POST body is JSON which is deserialized and used as values to create a
new file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
# Create a new file data structure.
resource = {
'title': data['title'],
'description': data['description'],
'mimeType': data['mimeType'],
}
try:
# Make an insert request to create a new file. A MediaInMemoryUpload
# instance is used to upload the file body.
resource = service.files().insert(
body=resource,
media_body=MediaInMemoryUpload(
data.get('content', ''),
data['mimeType'],
resumable=True)
).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def get(self):
"""Called when HTTP GET requests are received by the web application.
Use the query parameter file_id to fetch the required file's metadata then
content and return it as a JSON object.
Since DrEdit deals with text files, it is safe to dump the content directly
into JSON, but this is not the case with binary files, where something like
Base64 encoding is more appropriate.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
# Requests are expected to pass the file_id query parameter.
file_id = self.request.get('file_id')
if file_id:
# Fetch the file metadata by making the service.files().get method of
# the Drive API.
f = service.files().get(fileId=file_id).execute()
downloadUrl = f.get('downloadUrl')
# If a download URL is provided in the file metadata, use it to make an
# authorized request to fetch the file ontent. Set this content in the
# data to return as the 'content' field. If there is no downloadUrl,
# just set empty content.
if downloadUrl:
resp, f['content'] = service._http.request(downloadUrl)
else:
f['content'] = ''
else:
f = None
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(f)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
def put(self):
"""Called when HTTP PUT requests are received by the web application.
The PUT body is JSON which is deserialized and used as values to update
a file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
try:
# Create a new file data structure.
content = data.get('content')
if 'content' in data:
data.pop('content')
if content is not None:
# Make an update request to update the file. A MediaInMemoryUpload
# instance is used to upload the file body. Because of a limitation, this
# request must be made in two parts, the first to update the metadata, and
# the second to update the body.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data,
media_body=MediaInMemoryUpload(
content, data['mimeType'], resumable=True)
).execute()
else:
# Only update the metadata, a patch request is prefered but not yet
# supported on Google App Engine; see
# http://code.google.com/p/googleappengine/issues/detail?id=6316.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def RequestJSON(self):
"""Load the request body as JSON.
Returns:
Request body loaded as JSON or None if there is no request body.
"""
if self.request.body:
return json.loads(self.request.body)
class UserHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateUserInfo()
if service is None:
return
try:
result = service.userinfo().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class AboutHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
result = service.about().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
# Create an WSGI application suitable for running on App Engine
application = webapp.WSGIApplication(
[('/', MainPage), ('/svc', ServiceHandler), ('/about', AboutHandler),
('/user', UserHandler)],
# XXX Set to False in production.
debug=True
)
def main():
"""Main entry point for executing a request with this handler."""
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import base64
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
self.__httptunnel = True
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def sendall(self, content, *args):
""" override socket.socket.sendall method to rewrite the header
for non-tunneling proxies if needed
"""
if not self.__httptunnel:
content = self.__rewriteproxy(content)
return super(socksocket, self).sendall(content, *args)
def __rewriteproxy(self, header):
""" rewrite HTTP request headers to support non-tunneling proxies
(i.e. those which do not support the CONNECT method).
This only works for HTTP (not HTTPS) since HTTPS requires tunneling.
"""
host, endpt = None, None
hdrs = header.split("\r\n")
for hdr in hdrs:
if hdr.lower().startswith("host:"):
host = hdr
elif hdr.lower().startswith("get") or hdr.lower().startswith("post"):
endpt = hdr
if host and endpt:
hdrs.remove(host)
hdrs.remove(endpt)
host = host.split(" ")[1]
endpt = endpt.split(" ")
if (self.__proxy[4] != None and self.__proxy[5] != None):
hdrs.insert(0, self.__getauthheader())
hdrs.insert(0, "Host: %s" % host)
hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2]))
return "\r\n".join(hdrs)
def __getauthheader(self):
auth = self.__proxy[4] + ":" + self.__proxy[5]
return "Proxy-Authorization: Basic " + base64.b64encode(auth)
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"]
headers += ["Host: ", destaddr, "\r\n"]
if (self.__proxy[4] != None and self.__proxy[5] != None):
headers += [self.__getauthheader(), "\r\n"]
headers.append("\r\n")
self.sendall("".join(headers).encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
if destpair[1] == 443:
self.__negotiatehttp(destpair[0],destpair[1])
else:
self.__httptunnel = False
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| Python |
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| Python |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.2"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
import errno
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
self.host, self.port, 0, socket.SOCK_STREAM):
try:
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_certificate_validation
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = str(response.status_code)
self.response.status = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
if self.response:
return self.response
else:
raise httplib.HTTPException()
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
"""
The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if issubclass(connection_type, HTTPSConnectionWithTimeout):
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| Python |
# This is the version of this source code.
manual_verstr = "1.5"
auto_build_num = "211"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import logging
import time
from OpenSSL import crypto
from anyjson import simplejson
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
class AppIdentityError(Exception):
pass
class Verifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was singed by the private key associated with the public
key that this object was constructed with.
"""
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return Verifier(pubkey)
class Signer(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey, The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in P12 format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return Signer(pkey)
def _urlsafe_b64encode(raw_bytes):
return base64.urlsafe_b64encode(raw_bytes).rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return simplejson.dumps(data, separators = (',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logging.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if (len(segments) != 3):
raise AppIdentityError(
'Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = simplejson.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for (keyname, pem) in certs.items():
verifier = Verifier.from_string(pem, True)
if (verifier.verify(signed, signature)):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import socket
import sys
import webbrowser
from client import FlowExchangeError
from client import OOB_CALLBACK_URN
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage, http=None):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url(oauth_callback)
if FLAGS.auth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http)
except FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading OAuth 2.0 client secret files.
A client_secrets.json file contains all the information needed to interact with
an OAuth 2.0 protected service.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
# Properties that make a client_secrets.json file valid.
TYPE_WEB = 'web'
TYPE_INSTALLED = 'installed'
VALID_CLIENT = {
TYPE_WEB: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
},
TYPE_INSTALLED: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
}
}
class Error(Exception):
"""Base error for this module."""
pass
class InvalidClientSecretsError(Error):
"""Format of ClientSecrets file is invalid."""
pass
def _validate_clientsecrets(obj):
if obj is None or len(obj) != 1:
raise InvalidClientSecretsError('Invalid file format.')
client_type = obj.keys()[0]
if client_type not in VALID_CLIENT.keys():
raise InvalidClientSecretsError('Unknown client type: %s.' % client_type)
client_info = obj[client_type]
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "%s" in a client type of "%s".' % (prop_name,
client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "%s" is not configured.' % prop_name)
return client_type, client_info
def load(fp):
obj = simplejson.load(fp)
return _validate_clientsecrets(obj)
def loads(s):
obj = simplejson.loads(s)
return _validate_clientsecrets(obj)
def loadfile(filename):
try:
fp = file(filename, 'r')
try:
obj = simplejson.load(fp)
finally:
fp.close()
except IOError:
raise InvalidClientSecretsError('File not found: "%s"' % filename)
return _validate_clientsecrets(obj)
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
from locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._client_id, self._user_agent,
self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _delete_credential(self, client_id, user_agent, scope):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: The scope(s) that this credential covers
"""
key = (client_id, user_agent, scope)
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
# Should work for Python2.6 and higher.
import json as simplejson
except ImportError: # pragma: no cover
try:
import simplejson
except ImportError:
# Try to import from django, should work on App Engine
from django.utils import simplejson
| Python |
__version__ = "1.0c2"
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import httplib2
import logging
import pickle
import time
import clientsecrets
from anyjson import simplejson
from client import AccessTokenRefreshError
from client import AssertionCredentials
from client import Credentials
from client import Flow
from client import OAuth2WebServerFlow
from client import Storage
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.api import app_identity
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
pass
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for
the purpose of accessing data stored under an account assigned to the App
Engine application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
None,
None,
None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
(token, _) = app_identity.get_access_token(self.scope)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logging.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logging.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logging.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credential = None
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
if self._cache:
self._cache.set(self._key_name, credential.to_json())
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
entity.delete()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, client_id, client_secret, scope,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
user_agent=None,
message=None, **kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
auth_uri, token_uri, **kwargs)
self.credentials = None
self._request_handler = None
self._message = message
self._in_error = False
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
callback = self._request_handler.request.relative_url('/oauth2callback')
url = self.flow.step1_get_authorize_url(callback)
user = users.get_current_user()
memcache.set(user.user_id(), pickle.dumps(self.flow),
namespace=OAUTH2CLIENT_NAMESPACE)
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, filename, scope, message=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type not in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError('OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
super(OAuth2DecoratorFromClientSecrets,
self).__init__(
client_info['client_id'],
client_info['client_secret'],
scope,
client_info['auth_uri'],
client_info['token_uri'],
message)
except clientsecrets.InvalidClientSecretsError:
self._in_error = True
if message is not None:
self._message = message
else:
self._message = "Please configure your application for OAuth 2.0"
def oauth2decorator_from_clientsecrets(filename, scope, message=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope, message)
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % errormsg)
else:
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id(),
namespace=OAUTH2CLIENT_NAMESPACE))
# This code should be ammended with application specific error
# handling. The following cases should be considered:
# 1. What if the flow doesn't exist in memcache? Or is corrupt?
# 2. What if the step2_exchange fails?
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
self.redirect(str(self.request.get('state')))
else:
# TODO Add error handling here.
pass
application = webapp.WSGIApplication([('/oauth2callback', OAuth2Handler)])
def main():
run_wsgi_app(application)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client.
Tools for interacting with OAuth 2.0 protected resources.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import clientsecrets
import copy
import datetime
import httplib2
import logging
import os
import sys
import time
import urllib
import urlparse
from anyjson import simplejson
HAS_OPENSSL = False
try:
from oauth2client.crypt import Signer
from oauth2client.crypt import make_signed_jwt
from oauth2client.crypt import verify_signed_jwt_with_certs
HAS_OPENSSL = True
except ImportError:
pass
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
# Expiry is stored in RFC3339 UTC format
EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Which certs to use to validate id_tokens received.
ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
# Constant to use for the out of band OAuth 2.0 flow.
OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class UnknownClientSecretsFlowError(Error):
"""The client secrets file called for an unknown type of OAuth 2.0 flow. """
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
class VerifyJwtTokenError(Error):
"""Could on retrieve certificates for validation."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method that applies the credentials to
an HTTP transport.
Subclasses must also specify a classmethod named 'from_json' that takes a JSON
string as input and returns an instaniated Credentials object.
"""
NON_SERIALIZED_MEMBERS = ['store']
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
_abstract()
def _to_json(self, strip):
"""Utility function for creating a JSON representation of an instance of Credentials.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
for member in strip:
if member in d:
del d[member]
if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime):
d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
# Add in information we will need later to reconsistitue this instance.
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a Credentials subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
try:
m = __import__(module)
except ImportError:
# In case there's an object from the old package structure, update it
module = module.replace('.apiclient', '')
m = __import__(module)
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
return Credentials()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential. This class supports locking
such that multiple processes and threads can operate on a single
store.
"""
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
pass
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
pass
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
"""
_abstract()
def get(self):
"""Retrieve credential.
The Storage lock must *not* be held when this is called.
Returns:
oauth2client.client.Credentials
"""
self.acquire_lock()
try:
return self.locked_get()
finally:
self.release_lock()
def put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
self.acquire_lock()
try:
return self.locked_delete()
finally:
self.release_lock()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then adds the OAuth 2.0 access token to each request.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent, id_token=None):
"""Create an instance of OAuth2Credentials.
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
access_token: string, access token.
client_id: string, client identifier.
client_secret: string, client secret.
refresh_token: string, refresh token.
token_expiry: datetime, when the access_token expires.
token_uri: string, URI of token endpoint.
user_agent: string, The HTTP User-Agent to provide for this application.
id_token: object, The identity of the resource owner.
Notes:
store: callable, A callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
self.id_token = id_token
# True if the credentials have been revoked or expired and can't be
# refreshed.
self.invalid = False
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
The modified http.request method will add authentication headers to each
request and will refresh access_tokens when a 401 is received on a
request. In addition the http.request method has a credentials property,
http.request.credentials, which is the Credentials object that authorized
it.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth subclass of httplib2.Authenication
because it never gets passed the absolute URI, which is needed for
signing. So instead we have to overload 'request' with a closure
that adds in the Authorization header and then calls the original
version of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if not self.access_token:
logger.info('Attempting refresh to obtain initial access_token')
self._refresh(request_orig)
# Modify the request headers to add the appropriate
# Authorization header.
if headers is None:
headers = {}
self.apply(headers)
if self.user_agent is not None:
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
if resp.status == 401:
logger.info('Refreshing due to a 401')
self._refresh(request_orig)
self.apply(headers)
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
setattr(http.request, 'credentials', self)
return http
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
self._refresh(http.request)
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
headers['Authorization'] = 'Bearer ' + self.access_token
def to_json(self):
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it. The JSON
should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
data = simplejson.loads(s)
if 'token_expiry' in data and not isinstance(data['token_expiry'],
datetime.datetime):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except:
data['token_expiry'] = None
retval = OAuth2Credentials(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
data.get('id_token', None))
retval.invalid = data['invalid']
return retval
@property
def access_token_expired(self):
"""True if the credential is expired or invalid.
If the token_expiry isn't set, we assume the token doesn't expire.
"""
if self.invalid:
return True
if not self.token_expiry:
return False
now = datetime.datetime.utcnow()
if now >= self.token_expiry:
logger.info('access_token is expired. Now: %s, token_expiry: %s',
now, self.token_expiry)
return True
return False
def set_store(self, store):
"""Set the Storage for the credential.
Args:
store: Storage, an implementation of Stroage object.
This is needed to store the latest access_token if it
has expired and been refreshed. This implementation uses
locking to check for updates before updating the
access_token.
"""
self.store = store
def _updateFromCredential(self, other):
"""Update this Credential from another instance."""
self.__dict__.update(other.__getstate__())
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body
def _generate_refresh_request_headers(self):
"""Generate the headers that will be used in the refresh request."""
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
return headers
def _refresh(self, http_request):
"""Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
if not self.store:
self._do_refresh_request(http_request)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http_request)
finally:
self.store.release_lock()
def _do_refresh_request(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or revoked,
# so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self.invalid = True
if self.store:
self.store.locked_put(self)
except:
pass
raise AccessTokenRefreshError(error_msg)
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the
authorize() method, which then signs each request from that object
with the OAuth 2.0 access token. This set of credentials is for the
use case where you have acquired an OAuth 2.0 access_token from
another place such as a JavaScript client or another web
application, and wish to use it from Python. Because only the
access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = AccessTokenCredentials(
data['access_token'],
data['user_agent'])
return retval
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class AssertionCredentials(OAuth2Credentials):
"""Abstract Credentials object used for OAuth 2.0 assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens. It must
be subclassed to generate the appropriate assertion string.
AssertionCredentials objects may be safely pickled and unpickled.
"""
def __init__(self, assertion_type, user_agent,
token_uri='https://accounts.google.com/o/oauth2/token',
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the auth
server
user_agent: string, The HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent)
self.assertion_type = assertion_type
def _generate_refresh_request_body(self):
assertion = self._generate_assertion()
body = urllib.urlencode({
'assertion_type': self.assertion_type,
'assertion': assertion,
'grant_type': 'assertion',
})
return body
def _generate_assertion(self):
"""Generate the assertion string that will be used in the access token
request.
"""
_abstract()
if HAS_OPENSSL:
# PyOpenSSL is not a prerequisite for oauth2client, so if it is missing then
# don't create the SignedJwtAssertionCredentials or the verify_id_token()
# method.
class SignedJwtAssertionCredentials(AssertionCredentials):
"""Credentials object used for OAuth 2.0 Signed JWT assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
def __init__(self,
service_account_name,
private_key,
scope,
private_key_password='notasecret',
user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for SignedJwtAssertionCredentials.
Args:
service_account_name: string, id for account, usually an email address.
private_key: string, private key in P12 format.
scope: string or list of strings, scope(s) of the credentials being
requested.
private_key_password: string, password for private_key.
user_agent: string, HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
kwargs: kwargs, Additional parameters to add to the JWT token, for
example prn=joe@xample.org."""
super(SignedJwtAssertionCredentials, self).__init__(
'http://oauth.net/grant_type/jwt/1.0/bearer',
user_agent,
token_uri=token_uri,
)
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.private_key = private_key
self.private_key_password = private_key_password
self.service_account_name = service_account_name
self.kwargs = kwargs
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = SignedJwtAssertionCredentials(
data['service_account_name'],
data['private_key'],
data['private_key_password'],
data['scope'],
data['user_agent'],
data['token_uri'],
data['kwargs']
)
retval.invalid = data['invalid']
return retval
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = long(time.time())
payload = {
'aud': self.token_uri,
'scope': self.scope,
'iat': now,
'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_name
}
payload.update(self.kwargs)
logger.debug(str(payload))
return make_signed_jwt(
Signer.from_string(self.private_key, self.private_key_password),
payload)
# Only used in verify_id_token(), which is always calling to the same URI
# for the certs.
_cached_http = httplib2.Http(MemoryCache())
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATON_CERTS):
"""Verifies a signed JWT id_token.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError if the JWT fails to verify.
"""
if http is None:
http = _cached_http
resp, content = http.request(cert_uri)
if resp.status == 200:
certs = simplejson.loads(content)
return verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: %d' % resp.status)
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
segments = id_token.split('.')
if (len(segments) != 3):
raise VerifyJwtTokenError(
'Wrong number of segments in token: %s' % id_token)
return simplejson.loads(_urlsafe_b64decode(segments[1]))
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri = 'postmessage',
http=None, user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token'):
"""Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
"""
flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
'https://accounts.google.com/o/oauth2/auth',
token_uri)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
def credentials_from_clientsecrets_and_code(filename, scope, code,
message = None,
redirect_uri = 'postmessage',
http=None):
"""Returns OAuth2Credentials from a clientsecrets file and an auth code.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of clientsecrets.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
flow = flow_from_clientsecrets(filename, scope, message)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, client_id, client_secret, scope, user_agent=None,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow.
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = {
'access_type': 'offline',
}
self.params.update(kwargs)
self.redirect_uri = None
def step1_get_authorize_url(self, redirect_uri=OOB_CALLBACK_URN):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
If redirect_uri is 'urn:ietf:wg:oauth:2.0:oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
self.redirect_uri = redirect_uri
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError if a problem occured exchanging the code for a
refresh_token.
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
if 'code' not in code:
if 'error' in code:
error_msg = code['error']
else:
error_msg = 'No code was supplied in the query parameters.'
raise FlowExchangeError(error_msg)
else:
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body,
headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if simplejson.loads fails?
d = simplejson.loads(content)
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(d['expires_in']))
if 'id_token' in d:
d['id_token'] = _extract_id_token(d['id_token'])
logger.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id,
self.client_secret, refresh_token, token_expiry,
self.token_uri, self.user_agent,
id_token=d.get('id_token', None))
else:
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except:
pass
raise FlowExchangeError(error_msg)
def flow_from_clientsecrets(filename, scope, message=None):
"""Create a Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) to request.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
return OAuth2WebServerFlow(
client_info['client_id'],
client_info['client_secret'],
scope,
None, # user_agent
client_info['auth_uri'],
client_info['token_uri'])
except clientsecrets.InvalidClientSecretsError:
if message:
sys.exit(message)
else:
raise
else:
raise UnknownClientSecretsFlowError(
'This OAuth 2.0 flow is unsupported: "%s"' * client_type)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import stat
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
try:
f = open(self._filename, 'rb')
content = f.read()
f.close()
except IOError:
return credentials
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._create_file_if_needed()
f = open(self._filename, 'wb')
f.write(credentials.to_json())
f.close()
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
os.unlink(self._filename)
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage:
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print 'Acquired filename with r+b mode'
f.file_handle().write('locked data')
else:
print 'Aquired filename with rb mode'
f.unlock_and_close()
"""
__author__ = 'cache@google.com (David T McWherter)'
import errno
import logging
import os
import time
logger = logging.getLogger(__name__)
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT|os.O_EXCL|os.O_RDWR)
self._locked = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds' % (
lock_filename, timeout))
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.unlink(lock_filename)
os.close(self._lock_fd)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError, e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise e
if e.errno != errno.EACCES:
raise e
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY|
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error, e:
if timeout == 0:
raise e
# If the error is not that the file is already in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
except pywintypes.error, e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
from oauth2client.anyjson import simplejson
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
for pname, pschema in schema.get('properties', {}).iteritems():
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class TwoLeggedOAuthCredentials(Credentials):
"""Two Legged Credentials object for OAuth 1.0a.
The Two Legged object is created directly, not from a flow. Once you
authorize and httplib2.Http instance you can change the requestor and that
change will propogate to the authorized httplib2.Http instance. For example:
http = httplib2.Http()
http = credentials.authorize(http)
credentials.requestor = 'foo@example.info'
http.request(...)
credentials.requestor = 'bar@example.info'
http.request(...)
"""
def __init__(self, consumer_key, consumer_secret, user_agent):
"""
Args:
consumer_key: string, An OAuth 1.0 consumer key
consumer_secret: string, An OAuth 1.0 consumer secret
user_agent: string, The HTTP User-Agent to provide for this application.
"""
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
self.user_agent = user_agent
self.store = None
# email address of the user to act on the behalf of.
self._requestor = None
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked.
Always returns False for Two Legged Credentials.
"""
return False
def getrequestor(self):
return self._requestor
def setrequestor(self, email):
self._requestor = email
requestor = property(getrequestor, setrequestor, None,
'The email address of the user to act on behalf of')
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
# add in xoauth_requestor_id=self._requestor to the uri
if self._requestor is None:
raise MissingParameter(
'Requestor must be set before using TwoLeggedOAuthCredentials')
parsed = list(urlparse.urlparse(uri))
q = parse_qsl(parsed[4])
q.append(('xoauth_requestor_id', self._requestor))
parsed[4] = urllib.urlencode(q)
uri = urlparse.urlunparse(parsed)
req = oauth.Request.from_consumer_and_token(
self.consumer, None, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, None)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
# Do not store the invalid state of the Credentials because
# being 2LO they could be reinstated in the future.
raise CredentialsInvalidError("Credentials are invalid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| Python |
# Copyright (C) 2007 Joe Gregorio
#
# Licensed under the MIT License
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.