code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/python
from wxctb import *
import sys
quit=new_intp()
intp_assign(quit, 0)
t=timer(2000,quit)
t.start()
while intp_value(quit)==0:
print "."
sleepms(100)
delete_intp(quit)
sys.exit(0)
| Python |
from time import sleep
from ctb import *
import sys
s=wxSerialPort()
s.Open("com4",115200)
s.Reset()
cmd="*idn?;*opc?\n"
print "send %s" % cmd,
s.Write(cmd)
print s.ReadUntilEOS()
cmd = ":syst:err?\n"
s.Write(cmd)
print "send %s" % cmd,
sleep(1)
print s.Read(5)
print s.GetAvailableBytes()
s.Read(1000)
# test line state switching
s.SetLineState(wxSERIAL_LINESTATE_DTR)
s.ClrLineState(wxSERIAL_LINESTATE_RTS)
print "test line state switching (toggle RTS and DTR)..."
lines = wxSERIAL_LINESTATE_DTR | wxSERIAL_LINESTATE_RTS
for i in range(10):
s.ChangeLineState(lines)
sleep(0.2)
# test line state reading
print "test line state reading..."
while 1:
lines = s.GetLineState()
if(lines & wxSERIAL_LINESTATE_CTS):
print "CTS is on"
else:
print "CTS is off"
sleep(0.1)
| Python |
# This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
import _wxctb
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if hasattr(self,name) or (name in ("this", "thisown")):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
new_intp = _wxctb.new_intp
copy_intp = _wxctb.copy_intp
delete_intp = _wxctb.delete_intp
intp_assign = _wxctb.intp_assign
intp_value = _wxctb.intp_value
class timer_control(object):
def __init__(self): raise RuntimeError, "No constructor defined"
def __repr__(self):
return "<%s.%s; proxy of C++ timer_control instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
msecs = property(_wxctb.timer_control_msecs_get, _wxctb.timer_control_msecs_set)
exitflag = property(_wxctb.timer_control_exitflag_get, _wxctb.timer_control_exitflag_set)
stop = property(_wxctb.timer_control_stop_get, _wxctb.timer_control_stop_set)
exitfnc = property(_wxctb.timer_control_exitfnc_get, _wxctb.timer_control_exitfnc_set)
class timer_controlPtr(timer_control):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = timer_control
_wxctb.timer_control_swigregister(timer_controlPtr)
class timer(object):
def __repr__(self):
return "<%s.%s; proxy of C++ timer instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args, **kwargs):
newobj = _wxctb.new_timer(*args, **kwargs)
self.this = newobj.this
self.thisown = 1
del newobj.thisown
def __del__(self, destroy=_wxctb.delete_timer):
try:
if self.thisown: destroy(self)
except: pass
def start(*args, **kwargs): return _wxctb.timer_start(*args, **kwargs)
def stop(*args, **kwargs): return _wxctb.timer_stop(*args, **kwargs)
class timerPtr(timer):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = timer
_wxctb.timer_swigregister(timerPtr)
sleepms = _wxctb.sleepms
CTB_RESET = _wxctb.CTB_RESET
class wxIOBase(object):
def __init__(self): raise RuntimeError, "No constructor defined"
def __repr__(self):
return "<%s.%s; proxy of C++ wxIOBase instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __del__(self, destroy=_wxctb.delete_wxIOBase):
try:
if self.thisown: destroy(self)
except: pass
def ClassName(*args, **kwargs): return _wxctb.wxIOBase_ClassName(*args, **kwargs)
def Close(*args, **kwargs): return _wxctb.wxIOBase_Close(*args, **kwargs)
def Ioctl(*args, **kwargs): return _wxctb.wxIOBase_Ioctl(*args, **kwargs)
def IsOpen(*args, **kwargs): return _wxctb.wxIOBase_IsOpen(*args, **kwargs)
def Open(*args, **kwargs): return _wxctb.wxIOBase_Open(*args, **kwargs)
def PutBack(*args, **kwargs): return _wxctb.wxIOBase_PutBack(*args, **kwargs)
def Read(*args, **kwargs): return _wxctb.wxIOBase_Read(*args, **kwargs)
def ReadUntilEOS(*args, **kwargs): return _wxctb.wxIOBase_ReadUntilEOS(*args, **kwargs)
def Readv(*args, **kwargs): return _wxctb.wxIOBase_Readv(*args, **kwargs)
def Write(*args, **kwargs): return _wxctb.wxIOBase_Write(*args, **kwargs)
def Writev(*args, **kwargs): return _wxctb.wxIOBase_Writev(*args, **kwargs)
class wxIOBasePtr(wxIOBase):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxIOBase
_wxctb.wxIOBase_swigregister(wxIOBasePtr)
wxBAUD_150 = _wxctb.wxBAUD_150
wxBAUD_300 = _wxctb.wxBAUD_300
wxBAUD_600 = _wxctb.wxBAUD_600
wxBAUD_1200 = _wxctb.wxBAUD_1200
wxBAUD_2400 = _wxctb.wxBAUD_2400
wxBAUD_4800 = _wxctb.wxBAUD_4800
wxBAUD_9600 = _wxctb.wxBAUD_9600
wxBAUD_19200 = _wxctb.wxBAUD_19200
wxBAUD_38400 = _wxctb.wxBAUD_38400
wxBAUD_57600 = _wxctb.wxBAUD_57600
wxBAUD_115200 = _wxctb.wxBAUD_115200
wxBAUD_230400 = _wxctb.wxBAUD_230400
wxBAUD_460800 = _wxctb.wxBAUD_460800
wxBAUD_921600 = _wxctb.wxBAUD_921600
wxPARITY_NONE = _wxctb.wxPARITY_NONE
wxPARITY_ODD = _wxctb.wxPARITY_ODD
wxPARITY_EVEN = _wxctb.wxPARITY_EVEN
wxPARITY_MARK = _wxctb.wxPARITY_MARK
wxPARITY_SPACE = _wxctb.wxPARITY_SPACE
wxSERIAL_LINESTATE_DCD = _wxctb.wxSERIAL_LINESTATE_DCD
wxSERIAL_LINESTATE_CTS = _wxctb.wxSERIAL_LINESTATE_CTS
wxSERIAL_LINESTATE_DSR = _wxctb.wxSERIAL_LINESTATE_DSR
wxSERIAL_LINESTATE_DTR = _wxctb.wxSERIAL_LINESTATE_DTR
wxSERIAL_LINESTATE_RING = _wxctb.wxSERIAL_LINESTATE_RING
wxSERIAL_LINESTATE_RTS = _wxctb.wxSERIAL_LINESTATE_RTS
wxSERIAL_LINESTATE_NULL = _wxctb.wxSERIAL_LINESTATE_NULL
class wxSerialPort_DCS(object):
def __repr__(self):
return "<%s.%s; proxy of C++ wxSerialPort_DCS instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
baud = property(_wxctb.wxSerialPort_DCS_baud_get, _wxctb.wxSerialPort_DCS_baud_set)
parity = property(_wxctb.wxSerialPort_DCS_parity_get, _wxctb.wxSerialPort_DCS_parity_set)
wordlen = property(_wxctb.wxSerialPort_DCS_wordlen_get, _wxctb.wxSerialPort_DCS_wordlen_set)
stopbits = property(_wxctb.wxSerialPort_DCS_stopbits_get, _wxctb.wxSerialPort_DCS_stopbits_set)
rtscts = property(_wxctb.wxSerialPort_DCS_rtscts_get, _wxctb.wxSerialPort_DCS_rtscts_set)
xonxoff = property(_wxctb.wxSerialPort_DCS_xonxoff_get, _wxctb.wxSerialPort_DCS_xonxoff_set)
def __init__(self, *args, **kwargs):
newobj = _wxctb.new_wxSerialPort_DCS(*args, **kwargs)
self.this = newobj.this
self.thisown = 1
del newobj.thisown
def __del__(self, destroy=_wxctb.delete_wxSerialPort_DCS):
try:
if self.thisown: destroy(self)
except: pass
def GetSettings(*args, **kwargs): return _wxctb.wxSerialPort_DCS_GetSettings(*args, **kwargs)
class wxSerialPort_DCSPtr(wxSerialPort_DCS):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxSerialPort_DCS
_wxctb.wxSerialPort_DCS_swigregister(wxSerialPort_DCSPtr)
class wxSerialPort_EINFO(object):
def __repr__(self):
return "<%s.%s; proxy of C++ wxSerialPort_EINFO instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
brk = property(_wxctb.wxSerialPort_EINFO_brk_get, _wxctb.wxSerialPort_EINFO_brk_set)
frame = property(_wxctb.wxSerialPort_EINFO_frame_get, _wxctb.wxSerialPort_EINFO_frame_set)
overrun = property(_wxctb.wxSerialPort_EINFO_overrun_get, _wxctb.wxSerialPort_EINFO_overrun_set)
parity = property(_wxctb.wxSerialPort_EINFO_parity_get, _wxctb.wxSerialPort_EINFO_parity_set)
def __init__(self, *args, **kwargs):
newobj = _wxctb.new_wxSerialPort_EINFO(*args, **kwargs)
self.this = newobj.this
self.thisown = 1
del newobj.thisown
class wxSerialPort_EINFOPtr(wxSerialPort_EINFO):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxSerialPort_EINFO
_wxctb.wxSerialPort_EINFO_swigregister(wxSerialPort_EINFOPtr)
CTB_SER_GETEINFO = _wxctb.CTB_SER_GETEINFO
CTB_SER_GETBRK = _wxctb.CTB_SER_GETBRK
CTB_SER_GETFRM = _wxctb.CTB_SER_GETFRM
CTB_SER_GETOVR = _wxctb.CTB_SER_GETOVR
CTB_SER_GETPAR = _wxctb.CTB_SER_GETPAR
CTB_SER_GETINQUE = _wxctb.CTB_SER_GETINQUE
class wxSerialPort_x(wxIOBase):
def __init__(self): raise RuntimeError, "No constructor defined"
def __repr__(self):
return "<%s.%s; proxy of C++ wxSerialPort_x instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __del__(self, destroy=_wxctb.delete_wxSerialPort_x):
try:
if self.thisown: destroy(self)
except: pass
def ClassName(*args, **kwargs): return _wxctb.wxSerialPort_x_ClassName(*args, **kwargs)
def ChangeLineState(*args, **kwargs): return _wxctb.wxSerialPort_x_ChangeLineState(*args, **kwargs)
def ClrLineState(*args, **kwargs): return _wxctb.wxSerialPort_x_ClrLineState(*args, **kwargs)
def GetLineState(*args, **kwargs): return _wxctb.wxSerialPort_x_GetLineState(*args, **kwargs)
def GetSettingsAsString(*args, **kwargs): return _wxctb.wxSerialPort_x_GetSettingsAsString(*args, **kwargs)
def Ioctl(*args, **kwargs): return _wxctb.wxSerialPort_x_Ioctl(*args, **kwargs)
def SendBreak(*args, **kwargs): return _wxctb.wxSerialPort_x_SendBreak(*args, **kwargs)
def SetBaudRate(*args, **kwargs): return _wxctb.wxSerialPort_x_SetBaudRate(*args, **kwargs)
def SetLineState(*args, **kwargs): return _wxctb.wxSerialPort_x_SetLineState(*args, **kwargs)
class wxSerialPort_xPtr(wxSerialPort_x):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxSerialPort_x
_wxctb.wxSerialPort_x_swigregister(wxSerialPort_xPtr)
wxCOM1 = "com1"
wxCOM2 = "com2"
wxCOM3 = "com3"
wxCOM4 = "com4"
wxCOM5 = "com5"
wxCOM6 = "com6"
wxCOM7 = "com7"
wxCOM8 = "com8"
wxCOM9 = "com9"
wxCOM10 = "\\\\.\\com10"
wxCOM11 = "\\\\.\\com11"
wxCOM12 = "\\\\.\\com12"
wxCOM13 = "\\\\.\\com13"
wxCOM14 = "\\\\.\\com14"
wxCOM15 = "\\\\.\\com15"
wxCOM16 = "\\\\.\\com16"
class wxSerialPort(wxSerialPort_x):
def __repr__(self):
return "<%s.%s; proxy of C++ wxSerialPort instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args, **kwargs):
newobj = _wxctb.new_wxSerialPort(*args, **kwargs)
self.this = newobj.this
self.thisown = 1
del newobj.thisown
def __del__(self, destroy=_wxctb.delete_wxSerialPort):
try:
if self.thisown: destroy(self)
except: pass
def ChangeLineState(*args, **kwargs): return _wxctb.wxSerialPort_ChangeLineState(*args, **kwargs)
def ClrLineState(*args, **kwargs): return _wxctb.wxSerialPort_ClrLineState(*args, **kwargs)
def GetLineState(*args, **kwargs): return _wxctb.wxSerialPort_GetLineState(*args, **kwargs)
def GetSettingsAsString(*args, **kwargs): return _wxctb.wxSerialPort_GetSettingsAsString(*args, **kwargs)
def Ioctl(*args, **kwargs): return _wxctb.wxSerialPort_Ioctl(*args, **kwargs)
def IsOpen(*args, **kwargs): return _wxctb.wxSerialPort_IsOpen(*args, **kwargs)
def Read(*args, **kwargs): return _wxctb.wxSerialPort_Read(*args, **kwargs)
def SendBreak(*args, **kwargs): return _wxctb.wxSerialPort_SendBreak(*args, **kwargs)
def SetBaudRate(*args, **kwargs): return _wxctb.wxSerialPort_SetBaudRate(*args, **kwargs)
def SetLineState(*args, **kwargs): return _wxctb.wxSerialPort_SetLineState(*args, **kwargs)
def Write(*args, **kwargs): return _wxctb.wxSerialPort_Write(*args, **kwargs)
class wxSerialPortPtr(wxSerialPort):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxSerialPort
_wxctb.wxSerialPort_swigregister(wxSerialPortPtr)
GetKey = _wxctb.GetKey
wxGPIB_TO_NONE = _wxctb.wxGPIB_TO_NONE
wxGPIB_TO_10us = _wxctb.wxGPIB_TO_10us
wxGPIB_TO_30us = _wxctb.wxGPIB_TO_30us
wxGPIB_TO_100us = _wxctb.wxGPIB_TO_100us
wxGPIB_TO_300us = _wxctb.wxGPIB_TO_300us
wxGPIB_TO_1ms = _wxctb.wxGPIB_TO_1ms
wxGPIB_TO_3ms = _wxctb.wxGPIB_TO_3ms
wxGPIB_TO_10ms = _wxctb.wxGPIB_TO_10ms
wxGPIB_TO_30ms = _wxctb.wxGPIB_TO_30ms
wxGPIB_TO_100ms = _wxctb.wxGPIB_TO_100ms
wxGPIB_TO_300ms = _wxctb.wxGPIB_TO_300ms
wxGPIB_TO_1s = _wxctb.wxGPIB_TO_1s
wxGPIB_TO_3s = _wxctb.wxGPIB_TO_3s
wxGPIB_TO_10s = _wxctb.wxGPIB_TO_10s
wxGPIB_TO_30s = _wxctb.wxGPIB_TO_30s
wxGPIB_TO_100s = _wxctb.wxGPIB_TO_100s
wxGPIB_TO_300s = _wxctb.wxGPIB_TO_300s
wxGPIB_TO_1000s = _wxctb.wxGPIB_TO_1000s
class wxGPIB_DCS(object):
def __repr__(self):
return "<%s.%s; proxy of C++ wxGPIB_DCS instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
m_address1 = property(_wxctb.wxGPIB_DCS_m_address1_get, _wxctb.wxGPIB_DCS_m_address1_set)
m_address2 = property(_wxctb.wxGPIB_DCS_m_address2_get, _wxctb.wxGPIB_DCS_m_address2_set)
m_timeout = property(_wxctb.wxGPIB_DCS_m_timeout_get, _wxctb.wxGPIB_DCS_m_timeout_set)
m_eot = property(_wxctb.wxGPIB_DCS_m_eot_get, _wxctb.wxGPIB_DCS_m_eot_set)
m_eosChar = property(_wxctb.wxGPIB_DCS_m_eosChar_get, _wxctb.wxGPIB_DCS_m_eosChar_set)
m_eosMode = property(_wxctb.wxGPIB_DCS_m_eosMode_get, _wxctb.wxGPIB_DCS_m_eosMode_set)
def __init__(self, *args, **kwargs):
newobj = _wxctb.new_wxGPIB_DCS(*args, **kwargs)
self.this = newobj.this
self.thisown = 1
del newobj.thisown
def GetSettings(*args, **kwargs): return _wxctb.wxGPIB_DCS_GetSettings(*args, **kwargs)
class wxGPIB_DCSPtr(wxGPIB_DCS):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxGPIB_DCS
_wxctb.wxGPIB_DCS_swigregister(wxGPIB_DCSPtr)
CTB_GPIB_SETADR = _wxctb.CTB_GPIB_SETADR
CTB_GPIB_GETRSP = _wxctb.CTB_GPIB_GETRSP
CTB_GPIB_GETSTA = _wxctb.CTB_GPIB_GETSTA
CTB_GPIB_GETERR = _wxctb.CTB_GPIB_GETERR
CTB_GPIB_GETLINES = _wxctb.CTB_GPIB_GETLINES
CTB_GPIB_SETTIMEOUT = _wxctb.CTB_GPIB_SETTIMEOUT
CTB_GPIB_GTL = _wxctb.CTB_GPIB_GTL
CTB_GPIB_REN = _wxctb.CTB_GPIB_REN
CTB_GPIB_RESET_BUS = _wxctb.CTB_GPIB_RESET_BUS
CTB_GPIB_FIND_LISTENERS = _wxctb.CTB_GPIB_FIND_LISTENERS
class wxGPIB_x(wxIOBase):
def __init__(self): raise RuntimeError, "No constructor defined"
def __repr__(self):
return "<%s.%s; proxy of C++ wxGPIB_x instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __del__(self, destroy=_wxctb.delete_wxGPIB_x):
try:
if self.thisown: destroy(self)
except: pass
def ClassName(*args, **kwargs): return _wxctb.wxGPIB_x_ClassName(*args, **kwargs)
def GetError(*args, **kwargs): return _wxctb.wxGPIB_x_GetError(*args, **kwargs)
def Ibrd(*args, **kwargs): return _wxctb.wxGPIB_x_Ibrd(*args, **kwargs)
def Ibwrt(*args, **kwargs): return _wxctb.wxGPIB_x_Ibwrt(*args, **kwargs)
def Ioctl(*args, **kwargs): return _wxctb.wxGPIB_x_Ioctl(*args, **kwargs)
def IsOpen(*args, **kwargs): return _wxctb.wxGPIB_x_IsOpen(*args, **kwargs)
def Read(*args, **kwargs): return _wxctb.wxGPIB_x_Read(*args, **kwargs)
def Write(*args, **kwargs): return _wxctb.wxGPIB_x_Write(*args, **kwargs)
class wxGPIB_xPtr(wxGPIB_x):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxGPIB_x
_wxctb.wxGPIB_x_swigregister(wxGPIB_xPtr)
class wxGPIB(wxGPIB_x):
def __repr__(self):
return "<%s.%s; proxy of C++ wxGPIB instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args, **kwargs):
newobj = _wxctb.new_wxGPIB(*args, **kwargs)
self.this = newobj.this
self.thisown = 1
del newobj.thisown
def __del__(self, destroy=_wxctb.delete_wxGPIB):
try:
if self.thisown: destroy(self)
except: pass
class wxGPIBPtr(wxGPIB):
def __init__(self, this):
self.this = this
if not hasattr(self,"thisown"): self.thisown = 0
self.__class__ = wxGPIB
_wxctb.wxGPIB_swigregister(wxGPIBPtr)
| Python |
#!/usr/bin/python
from wxctb import *
import sys
quit=new_intp()
intp_assign(quit, 0)
t=timer(2000,quit)
t.start()
while intp_value(quit)==0:
print "."
sleepms(100)
delete_intp(quit)
sys.exit(0)
| Python |
import wxctb, sys, re
wxSERIAL_LINESTATE_DCD = wxctb.wxSERIAL_LINESTATE_DCD
wxSERIAL_LINESTATE_CTS = wxctb.wxSERIAL_LINESTATE_CTS
wxSERIAL_LINESTATE_DSR = wxctb.wxSERIAL_LINESTATE_DSR
wxSERIAL_LINESTATE_DTR = wxctb.wxSERIAL_LINESTATE_DTR
wxSERIAL_LINESTATE_RING = wxctb.wxSERIAL_LINESTATE_RING
wxSERIAL_LINESTATE_RTS = wxctb.wxSERIAL_LINESTATE_RTS
wxSERIAL_LINESTATE_NULL = wxctb.wxSERIAL_LINESTATE_NULL
def abstract():
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError(caller + ' must be implemented in subclass')
class wxIOBase:
def __init__(self):
self.device = None
# set timeout to 1000ms (the default)
self.timeout = 1000
def __del__(self):
pass
def Close(self):
if self.device:
self.device.Close()
def GetTimeout(self):
"""
Returns the internal timeout value in milliseconds
"""
return self.timeout
def Ioctl(self,cmd,arg):
if self.device:
self.device.Ioctl(cmd,arg)
def Open(self):
abstract()
def PutBack(self,char):
return self.device.PutBack(char)
def Read(self,length):
"""
Try to read the given count of data (length) and returns the
successfully readed number of data. The function never blocks.
For example:
readed = dev.Read(100)
"""
buf = "\x00"*(length+1)
self.device.Read(buf,length)
return buf
def ReadBinary(self,eos="\n"):
"""
Special SCPI command. Read the next data coded as a SCPI
binary format.
A binary data transfer will be startet by '#'. The next byte
tells the count of bytes for the binary length header,
following by the length bytes. After these the data begins.
For example:
#500004xxxx
The header length covers 5 Byte, the length of the binary
data is 4 (x means the binary data bytes)
"""
try:
eoslen = len(eos)
b=self.Readv(2)
if len(b) == 2:
hl = int(b[1])
b = self.Readv(hl)
if len(b) == hl:
dl = int(b)
# don't left over the eos string or character in the
# device input buffer
data = self.Readv(dl+eoslen)
# check, if the binary data block is complete
if data[dl] == '#':
# not complete, another block is following
for c in data[dl:dl+eoslen]:
self.PutBack(c)
data = data[:dl] + self.ReadBinary()
return data
except:
pass
return ''
def ReadUntilEOS(self,eos="\n",quota=0):
"""
ReadUntilEOS(eosString=\"\\n\",timeout=1000)
Reads data until the given eos string was received (default is
the linefeed character (0x0a) or the internal timeout
(default 1000ms) was reached.
ReadUntilEOS returns the result as the following tuple:
['received string',state,readedBytes]
If a timeout occurred, state is 0, otherwise 1
"""
return self.device.ReadUntilEOS("",0,eos,self.timeout,quota)
def Readv(self,length):
"""
Try to read the given count of data. Readv blocks until all data
was readed successfully or the internal timeout, set with the
class member function SetTimeout(timeout), was reached.
Returns the readed data.
"""
buf = "\x00"*length
self.device.Readv(buf,length,self.timeout)
return buf
def ResetBus(self):
"""
If the underlaying interface needs some special reset operations
(for instance the GPIB distinguish between a normal device reset
and a special bus reset), you can put some code here)
"""
pass
def SetTimeout(self,timeout):
"""
Set the internal timeout value in milliseconds for all blocked
operations like ReadUntilEOS, Readv and Writev.
"""
self.timeout = timeout
def Write(self,string):
"""
Writes the given string to the device and returns immediately.
Write returns the number of data bytes successfully written or a
negativ number if an error occured. For some circumstances, not
the complete string was written.
So you have to verify the return value to check this out.
"""
return self.device.Write(string,len(string))
def Writev(self,string):
"""
Writes the given string to the device. The function blocks until
the complete string was written or the internal timeout, set with
SetTimeout(timeout), was reached.
Writev returns the number of data successfully written or a
negativ value, if an errors occurred.
"""
return self.device.Writev(string,len(string),self.timeout)
class wxSerialPort(wxIOBase):
def __init__(self):
wxIOBase.__init__(self)
def __del__(self):
self.Close()
def ChangeLineState(self,lineState):
"""
Change (toggle) the state of each the lines given in the
linestate parameter. Possible values are wxSERIAL_LINESTATE_DTR
(means the DTR signal) and/or wxSERIAL_LINESTATE_RTS (RTS signal).
For example to toggle the RTS line only:
dev.ChangeLineState(wxSERIAL_LINESTATE_RTS)
"""
self.device.ChangeLineState(lineState)
def ClrLineState(self,lineState):
"""
Clear the lines given in the linestate parameter. Possible
values are wxSERIAL_LINESTATE_DTR (means the DTR signal) and/or
wxSERIAL_LINESTATE_RTS (RTS signal). For example to clear only
the RTS line:
dev.ClrLineState(wxSERIAL_LINESTATE_RTS)
"""
self.device.ClrLineState(lineState)
def GetAvailableBytes(self):
"""
Returns the available bytes in the input queue of the serial
driver.
"""
n = wxctb.new_intp()
wxctb.intp_assign(n, 0)
self.device.Ioctl(wxctb.CTB_SER_GETINQUE,n)
return wxctb.intp_value(n)
def GetCommErrors(self):
"""
Get the internal communication errors like breaks, framing,
parity or overrun errors.
Returns the count of each error as a tuple like this:
(b,f,o,p) = dev.GetCommErrors()
b: breaks, f: framing errors, o: overruns, p: parity errors
"""
einfo = wxctb.wxSerialPort_EINFO()
self.device.Ioctl(wxctb.CTB_SER_GETEINFO,einfo)
return einfo.brk,einfo.frame,einfo.overrun,einfo.parity
def GetLineState(self):
"""
Returns the current linestates of the CTS, DCD, DSR and RING
signal line as an integer value with the appropriate bits or
-1 on error.
For example:
lines = dev.GetLineState()
if lines & wxSERIAL_LINESTATE_CTS:
print \"CTS is on\"
"""
return self.device.GetLineState()
def Open(self,devname,baudrate,protocol='8N1',handshake='no_handshake'):
"""
Open the device devname with the given baudrate, the protocol
like '8N1' (default) and the use of the handshake [no_handshake
(default), rtscts or xonxoff]
For example:
At Linux:
dev = wxSerialPort()
dev.Open(\"/dev/ttyS0\",115200)
or with a datalen of 7 bits, even parity, 2 stopbits and rts/cts
handshake:
dev.Open(\"/dev/ttyS0\",115200,'7E2',True)
At Windows:
dev = wxSerialPort()
dev.Open(\"COM1\",115200)
dev.Open(\"COM1\",115200,'7E2',True)
Returns the handle on success or a negativ value on failure.
"""
# the following parity values are valid:
# N:None, O:Odd, E:Even, M:Mark, S:Space
parity = {'N':0,'O':1,'E':2,'M':3,'S':4}
# the regular expression ensures a valid value for the datalen
# (5...8 bit) and the count of stopbits (1,2)
reg=re.compile(r"(?P<w>[8765])"r"(?P<p>[NOEMS])"r"(?P<s>[12])")
self.device = wxctb.wxSerialPort()
dcs = wxctb.wxSerialPort_DCS()
dcs.baud = baudrate
res = reg.search(protocol)
# handle the given protocol
if res:
dcs.wordlen = int(res.group('w'))
dcs.stopbits = int(res.group('s'))
dcs.parity = parity[res.group('p')]
# valid handshake are no one, rts/cts or xon/xoff
if handshake == 'rtscts':
dcs.rtscts = True
elif handshake == 'xonxoff':
dcs.xonxoff = True
return self.device.Open(devname,dcs)
def Reset(self):
"""
Send a break for 0.25s.
"""
self.device.SendBreak(0)
def SetBaudRate(self,baudrate):
"""
Set the baudrate for the device.
"""
self.device.SetBaudRate(baudrate)
def SetLineState(self,lineState):
"""
Set the lines given in the linestate parameter. Possible
values are wxSERIAL_LINESTATE_DTR (means the DTR signal) and/or
wxSERIAL_LINESTATE_RTS (RTS signal). For example to set both:
dev.SetLineState(wxSERIAL_LINESTATE_DTR | wxSERIAL_LINESTATE_RTS)
"""
self.device.SetLineState(lineState)
class wxGPIB(wxIOBase):
"""
wxGPIB class
"""
def __init__(self):
wxIOBase.__init__(self)
def __del__(self):
self.Close()
def FindListeners(self,board = 0):
"""
Returns the address of the connected devices as a list.
If no device is listening, the list is empty. If an error
occurs an IOError exception raised. For example:
g = wxGPIB()
listeners = g.FindListeners()
"""
listeners = wxctb.wxGPIB_x_FindListeners(board)
if listeners < 0:
raise IOError("GPIB board error")
result = []
for i in range(1,31):
if listeners & (1 << i):
result.append(i)
return result
def GetError(self):
errorString = " "*256
self.device.GetError(errorString,256)
return errorString
def GetSTB(self):
"""
Returns the value of the internal GPIB status byte register.
"""
stb = wxctb.new_intp()
wxctb.intp_assign(stb, 0)
self.device.Ioctl(wxctb.CTB_GPIB_GETRSP,stb)
return wxctb.intp_value(stb)
# This is only for internal usage!!!
def Ibrd(self,length):
buf = "\x00"*length
state = self.device.Ibrd(buf,length)
return state,buf
# This is only for internal usage!!!
def Ibwrt(self,string):
return self.device.Ibwrt(string,len(string))
def Open(self,devname,adr):
"""
Open(gpibdevice,address)
Opens a connected device at the GPIB bus. gpibdevice means the
controller, (mostly \"gpib1\"), address the address of the desired
device in the range 1...31. For example:
dev = wxGPIB()
dev.Open(\"gpib1\",17)
Opens the device with the address 17.
Open returns >= 0 or a negativ value, if something going wrong.
"""
self.device = wxctb.wxGPIB()
dcs = wxctb.wxGPIB_DCS()
dcs.m_address1 = adr
result = self.device.Open(devname,dcs)
return result
def Reset(self):
"""
Resets the connected device. In the GPIB definition, the device
should be reset to it's initial state, so you can restart a
formely lost communication.
"""
self.device.Ioctl(wxctb.CTB_RESET,None)
def ResetBus(self):
"""
The command asserts the GPIB interface clear (IFC) line for
ast least 100us if the GPIB board is the system controller.
This initializes the GPIB and makes the interface CIC and
active controller with ATN asserted.
Note! The IFC signal resets only the GPIB interface functions
of the bus devices and not the internal device functions.
For a device reset you should use the Reset() command above.
"""
self.device.Ioctl(wxctb.CTB_GPIB_RESET_BUS,None)
def GetKey():
"""
Returns the current pressed key or '\0', if no key is pressed.
You can simply create a query loop with:
while GetKey() == '\0':
... make some stuff ...
"""
return wxctb.GetKey()
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(0.0,-1.0,0.0)
line.SetPoint2(0.0, 1.0,0.0)
line.SetResolution(100)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
f = open('u.dat','w')
for i in range(velocity.GetNumberOfTuples()):
p = data.GetPoint(i)
a = velocity.GetTuple3(i)
y = p[1]
u = a[2] # z is along axis of pipe
ue = 111.61*(1 - y*y) # parabolic velocity
s = str(y) + " " + str(u) + " " + str(ue) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(0.0,-1.0,0.0)
line.SetPoint2(0.0, 1.0,0.0)
line.SetResolution(100)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
f = open('u.dat','w')
for i in range(velocity.GetNumberOfTuples()):
p = data.GetPoint(i)
a = velocity.GetTuple3(i)
y = p[1]
u = a[2] # z is along axis of pipe
ue = 111.61*(1 - y*y) # parabolic velocity
s = str(y) + " " + str(u) + " " + str(ue) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
GAMMA = 1.4
Mach = 0.9
pre_inf = 29765
q_inf = 16650
#Bounding box containing the geometry
#should not contain outer boundary
xmin, xmax = -0.1, 2.00
ymin, ymax = -0.08, 0.08
zmin, zmax = -0.5, 0.5
deg = 15.0 * math.pi / 180.0
#Normal to the cut plane
Nx = [0, 0, 0, 0, 0, 0, 0, 0]
Ny = [0, -math.cos(deg), 0, 0, 0, 0, 0, 0]
Nz = [1, math.sin(deg), 1, 1, 1, 1, 1, 1]
#Origin, some point on the cut plane
Ox = 0
Oy = 0
#List of cut planes to extract
#these are section for which experimental data is available
span = 0.457
zcuts = [0, 0, 0.25*span, 0.4*span, 0.6*span, 0.75*span, 0.85*span, 0.925*span]
#Fine cell/line which has p as first point
def NextCell(p,d):
ncells = d.GetNumberOfCells()
for i in range(0,ncells):
cell = d.GetCell(i)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
if p == p1:
return i
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Loop over cut-planes
ncut = 0
for Oz in zcuts:
print "Extracting data for z =", Oz
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx[ncut], Ny[ncut], Nz[ncut])
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut' + str(int(ncut)) + '.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
#Extract boundary, includes outer boundary
bd = vtk.vtkFeatureEdges()
bd.BoundaryEdgesOn()
bd.ColoringOff()
bd.FeatureEdgesOff()
bd.ManifoldEdgesOff()
bd.AddInput(out)
bd.Update()
shape = bd.GetOutput()
if shape.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
# Now extract only the shape not the outer boundary
g = vtk.vtkGeometryFilter()
g.ExtentClippingOn()
g.SetExtent(xmin,xmax,ymin,ymax,zmin,zmax)
g.SetInput(shape)
g.Update()
geom = g.GetOutput()
#Clean geom to remove unused points
#Not necessary, can be removed I think
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(geom)
cleaner.Update()
geom = cleaner.GetOutput()
if geom.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
#Extract pressure from point data
ptdata = geom.GetPointData()
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
#Write shape points into file
sfile = 'section' + str(int(ncut)) + '.dat'
numCells = geom.GetNumberOfCells()
f = open(sfile, 'w')
cellid = 0
for i in range(0,numCells+1):
cell = geom.GetCell(cellid)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
pt = geom.GetPoint(p1)
pre = pressure.GetValue(p1)
Cp = -(pre - pre_inf)/q_inf
spt = str(pt[0]) + " " + str(pt[1]) + " " + str(pt[2])
spt = spt + " " + str(Cp) + "\n"
f.write(spt)
cellid = NextCell(p2,geom)
f.close()
ncut = ncut + 1
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
GAMMA = 1.4
Mach = 0.9
pre_inf = 29765
q_inf = 16650
#Bounding box containing the geometry
#should not contain outer boundary
xmin, xmax = -0.1, 2.00
ymin, ymax = -0.08, 0.08
zmin, zmax = -0.5, 0.5
deg = 15.0 * math.pi / 180.0
#Normal to the cut plane
Nx = [0, 0, 0, 0, 0, 0, 0, 0]
Ny = [0, -math.cos(deg), 0, 0, 0, 0, 0, 0]
Nz = [1, math.sin(deg), 1, 1, 1, 1, 1, 1]
#Origin, some point on the cut plane
Ox = 0
Oy = 0
#List of cut planes to extract
#these are section for which experimental data is available
span = 0.457
zcuts = [0, 0, 0.25*span, 0.4*span, 0.6*span, 0.75*span, 0.85*span, 0.925*span]
#Fine cell/line which has p as first point
def NextCell(p,d):
ncells = d.GetNumberOfCells()
for i in range(0,ncells):
cell = d.GetCell(i)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
if p == p1:
return i
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Loop over cut-planes
ncut = 0
for Oz in zcuts:
print "Extracting data for z =", Oz
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx[ncut], Ny[ncut], Nz[ncut])
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut' + str(int(ncut)) + '.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
#Extract boundary, includes outer boundary
bd = vtk.vtkFeatureEdges()
bd.BoundaryEdgesOn()
bd.ColoringOff()
bd.FeatureEdgesOff()
bd.ManifoldEdgesOff()
bd.AddInput(out)
bd.Update()
shape = bd.GetOutput()
if shape.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
# Now extract only the shape not the outer boundary
g = vtk.vtkGeometryFilter()
g.ExtentClippingOn()
g.SetExtent(xmin,xmax,ymin,ymax,zmin,zmax)
g.SetInput(shape)
g.Update()
geom = g.GetOutput()
#Clean geom to remove unused points
#Not necessary, can be removed I think
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(geom)
cleaner.Update()
geom = cleaner.GetOutput()
if geom.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
#Extract pressure from point data
ptdata = geom.GetPointData()
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
#Write shape points into file
sfile = 'section' + str(int(ncut)) + '.dat'
numCells = geom.GetNumberOfCells()
f = open(sfile, 'w')
cellid = 0
for i in range(0,numCells+1):
cell = geom.GetCell(cellid)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
pt = geom.GetPoint(p1)
pre = pressure.GetValue(p1)
Cp = -(pre - pre_inf)/q_inf
spt = str(pt[0]) + " " + str(pt[1]) + " " + str(pt[2])
spt = spt + " " + str(Cp) + "\n"
f.write(spt)
cellid = NextCell(p2,geom)
f.close()
ncut = ncut + 1
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
GAMMA = 1.4
Mach = 0.3
pre_inf = 1.0/(GAMMA*Mach**2)
q_inf = 0.5
#Bounding box containing the geometry
#should not contain outer boundary
xmin, xmax = -2, 2
ymin, ymax = -2, 2
zmin, zmax = -2, 2
#Normal to the cut plane
Nx = 0
Ny = 0
Nz = 1
#Origin, some point on the cut plane
Ox = 0
Oy = 0
Oz = 0
#Fine cell/line which has p as first point
def NextCell(p,d):
ncells = d.GetNumberOfCells()
for i in range(0,ncells):
cell = d.GetCell(i)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
if p == p1:
return i
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx, Ny, Nz)
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
#Extract boundary, includes outer boundary
bd = vtk.vtkFeatureEdges()
bd.BoundaryEdgesOn()
bd.ColoringOff()
bd.FeatureEdgesOff()
bd.ManifoldEdgesOff()
bd.AddInput(out)
bd.Update()
shape = bd.GetOutput()
if shape.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
# Now extract only the shape not the outer boundary
g = vtk.vtkGeometryFilter()
g.ExtentClippingOn()
g.SetExtent(xmin,xmax,ymin,ymax,zmin,zmax)
g.SetInput(shape)
g.Update()
geom = g.GetOutput()
#Clean geom to remove unused points
#Not necessary, can be removed I think
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(geom)
cleaner.Update()
geom = cleaner.GetOutput()
if geom.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
#Extract pressure from point data
ptdata = geom.GetPointData()
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
#Write shape points into file
sfile = 'section.dat'
numCells = geom.GetNumberOfCells()
f = open(sfile, 'w')
cellid = 0
for i in range(0,numCells+1):
cell = geom.GetCell(cellid)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
pt = geom.GetPoint(p1)
pre = pressure.GetValue(p1)
Cp = -(pre - pre_inf)/q_inf
spt = str(pt[0]) + " " + str(pt[1]) + " " + str(pt[2])
spt = spt + " " + str(Cp) + "\n"
f.write(spt)
cellid = NextCell(p2,geom)
f.close()
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
GAMMA = 1.4
Mach = 0.3
pre_inf = 1.0/(GAMMA*Mach**2)
q_inf = 0.5
#Bounding box containing the geometry
#should not contain outer boundary
xmin, xmax = -2, 2
ymin, ymax = -2, 2
zmin, zmax = -2, 2
#Normal to the cut plane
Nx = 0
Ny = 0
Nz = 1
#Origin, some point on the cut plane
Ox = 0
Oy = 0
Oz = 0
#Fine cell/line which has p as first point
def NextCell(p,d):
ncells = d.GetNumberOfCells()
for i in range(0,ncells):
cell = d.GetCell(i)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
if p == p1:
return i
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx, Ny, Nz)
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
#Extract boundary, includes outer boundary
bd = vtk.vtkFeatureEdges()
bd.BoundaryEdgesOn()
bd.ColoringOff()
bd.FeatureEdgesOff()
bd.ManifoldEdgesOff()
bd.AddInput(out)
bd.Update()
shape = bd.GetOutput()
if shape.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
# Now extract only the shape not the outer boundary
g = vtk.vtkGeometryFilter()
g.ExtentClippingOn()
g.SetExtent(xmin,xmax,ymin,ymax,zmin,zmax)
g.SetInput(shape)
g.Update()
geom = g.GetOutput()
#Clean geom to remove unused points
#Not necessary, can be removed I think
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(geom)
cleaner.Update()
geom = cleaner.GetOutput()
if geom.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
#Extract pressure from point data
ptdata = geom.GetPointData()
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
#Write shape points into file
sfile = 'section.dat'
numCells = geom.GetNumberOfCells()
f = open(sfile, 'w')
cellid = 0
for i in range(0,numCells+1):
cell = geom.GetCell(cellid)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
pt = geom.GetPoint(p1)
pre = pressure.GetValue(p1)
Cp = -(pre - pre_inf)/q_inf
spt = str(pt[0]) + " " + str(pt[1]) + " " + str(pt[2])
spt = spt + " " + str(Cp) + "\n"
f.write(spt)
cellid = NextCell(p2,geom)
f.close()
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
mu = 8.0e-4
rho_inf = 0.1
q_inf = 86.797
nu = mu/rho_inf
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
x = 0.8
Rex = q_inf * x / nu
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(x,0.0,0.0)
line.SetPoint2(x,0.5,0.0)
line.SetResolution(500)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
f = open('x.dat','w')
for i in range(velocity.GetNumberOfTuples()):
p = data.GetPoint(i)
a = velocity.GetTuple3(i)
y = p[1]
ux = a[0] / q_inf
uy = a[1] * math.sqrt(2*Rex) / q_inf
eta = y*math.sqrt(0.5*Rex)/x
s = str(eta) + " " + str(ux) + " " + str(uy) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
mu = 8.0e-4
rho_inf = 0.1
q_inf = 86.797
nu = mu/rho_inf
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
x = 0.8
Rex = q_inf * x / nu
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(x,0.0,0.0)
line.SetPoint2(x,0.5,0.0)
line.SetResolution(500)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
f = open('x.dat','w')
for i in range(velocity.GetNumberOfTuples()):
p = data.GetPoint(i)
a = velocity.GetTuple3(i)
y = p[1]
ux = a[0] / q_inf
uy = a[1] * math.sqrt(2*Rex) / q_inf
eta = y*math.sqrt(0.5*Rex)/x
s = str(eta) + " " + str(ux) + " " + str(uy) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut.vtp and line.dat files
cut.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
line.dat is a plain ascii file -> use gnuplot
"""
import sys
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
import math
# Flow properties
gamma = 1.4
mach = 3.0
#Normal to the cut plane
Nx = 1
Ny = 0
Nz = 0
#Origin, some point on the cut plane
Ox = 0
Oy = 0
Oz = 0
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx, Ny, Nz)
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
# Extract solution along stagnation streamline
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(0.0,0.0,2.0)
line.SetPoint2(0.0,0.0,1.0)
line.SetResolution(100)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("density")
density = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
# Writing following variables
# z, density, velocity, pressure, entropy
f = open('line.dat','w')
for i in range(velocity.GetNumberOfTuples()):
pt = data.GetPoint(i)
z = pt[2]
u = velocity.GetTuple3(i)
r = density.GetTuple1(i)
p = pressure.GetTuple1(i)
ent= math.log(p/r**gamma)
s = str(z) + " " + str(r) + " " + str(u[2]) + " " + str(p)
s += " " + str(ent) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut.vtp and line.dat files
cut.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
line.dat is a plain ascii file -> use gnuplot
"""
import sys
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
import math
# Flow properties
gamma = 1.4
mach = 3.0
#Normal to the cut plane
Nx = 1
Ny = 0
Nz = 0
#Origin, some point on the cut plane
Ox = 0
Oy = 0
Oz = 0
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx, Ny, Nz)
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
# Extract solution along stagnation streamline
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(0.0,0.0,2.0)
line.SetPoint2(0.0,0.0,1.0)
line.SetResolution(100)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("density")
density = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
# Writing following variables
# z, density, velocity, pressure, entropy
f = open('line.dat','w')
for i in range(velocity.GetNumberOfTuples()):
pt = data.GetPoint(i)
z = pt[2]
u = velocity.GetTuple3(i)
r = density.GetTuple1(i)
p = pressure.GetTuple1(i)
ent= math.log(p/r**gamma)
s = str(z) + " " + str(r) + " " + str(u[2]) + " " + str(p)
s += " " + str(ent) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(0,0.0,0.0)
line.SetPoint2(1,0.0,0.0)
line.SetResolution(100)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("density")
density = ptdata.GetArray(arrayid)
f = open('line.dat','w')
for i in range(velocity.GetNumberOfTuples()):
pt = data.GetPoint(i)
x = pt[0]
a = velocity.GetTuple3(i)
u = a[0]
p = pressure.GetTuple1(i)
r = density.GetTuple1(i)
s = str(x) + " " + str(r) + " " + str(u) + " " + str(p) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
import math
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
# Create three the line source to use for the probe lines.
line = vtk.vtkLineSource()
line.SetPoint1(0,0.0,0.0)
line.SetPoint2(1,0.0,0.0)
line.SetResolution(100)
# Move the line into place and create the probe filter. For
# vtkProbeFilter, the probe line is the input, and the underlying data
# set is the source.
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(line.GetOutputPort())
probe.SetSource(reader.GetOutput())
probe.Update()
data=probe.GetOutput()
#Extract velocity from point data
ptdata = data.GetPointData()
arrayid = ptdata.SetActiveVectors("velocity")
velocity = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
arrayid = ptdata.SetActiveScalars("density")
density = ptdata.GetArray(arrayid)
f = open('line.dat','w')
for i in range(velocity.GetNumberOfTuples()):
pt = data.GetPoint(i)
x = pt[0]
a = velocity.GetTuple3(i)
u = a[0]
p = pressure.GetTuple1(i)
r = density.GetTuple1(i)
s = str(x) + " " + str(r) + " " + str(u) + " " + str(p) + "\n"
f.write(s)
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
GAMMA = 1.4
Mach = 0.8395
pre_inf = 1.0/(GAMMA*Mach**2)
q_inf = 0.5
#Bounding box containing the geometry
#should not contain outer boundary
xmin, xmax = 4, 7
ymin, ymax = 4, 6
zmin, zmax = 0.01, 1.5
#Normal to the cut plane
Nx = 0
Ny = 0
Nz = 1
#Origin, some point on the cut plane
Ox = 0
Oy = 0
#List of cut planes to extract
#these are section for which experimental data is available
span = 1.1963
zcuts = [0.2*span, 0.44*span, 0.65*span, 0.8*span, 0.9*span, 0.95*span, 0.99*span]
#Fine cell/line which has p as first point
def NextCell(p,d):
ncells = d.GetNumberOfCells()
for i in range(0,ncells):
cell = d.GetCell(i)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
if p == p1:
return i
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Loop over cut-planes
ncut = 0
for Oz in zcuts:
print "Extracting data for z =", Oz
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx, Ny, Nz)
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut' + str(int(ncut)) + '.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
#Extract boundary, includes outer boundary
bd = vtk.vtkFeatureEdges()
bd.BoundaryEdgesOn()
bd.ColoringOff()
bd.FeatureEdgesOff()
bd.ManifoldEdgesOff()
bd.AddInput(out)
bd.Update()
shape = bd.GetOutput()
if shape.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
# Now extract only the shape not the outer boundary
g = vtk.vtkGeometryFilter()
g.ExtentClippingOn()
g.SetExtent(xmin,xmax,ymin,ymax,zmin,zmax)
g.SetInput(shape)
g.Update()
geom = g.GetOutput()
#Clean geom to remove unused points
#Not necessary, can be removed I think
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(geom)
cleaner.Update()
geom = cleaner.GetOutput()
if geom.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
#Extract pressure from point data
ptdata = geom.GetPointData()
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
#Write shape points into file
sfile = 'section' + str(int(ncut)) + '.dat'
numCells = geom.GetNumberOfCells()
f = open(sfile, 'w')
cellid = 0
for i in range(0,numCells+1):
cell = geom.GetCell(cellid)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
pt = geom.GetPoint(p1)
pre = pressure.GetValue(p1)
Cp = -(pre - pre_inf)/q_inf
spt = str(pt[0]) + " " + str(pt[1]) + " " + str(pt[2])
spt = spt + " " + str(Cp) + "\n"
f.write(spt)
cellid = NextCell(p2,geom)
f.close()
ncut = ncut + 1
| Python |
#!/usr/bin/env python
"""
Generates cut?.vtp and section?.dat files
cut?.vtp is a VTK XMLPolyData file -> Use paraview to visualize this
section?.dat is a plain ascii file which contains x, y, z, -Cp
"""
import sys
#Check if a vtk file argument is given
if len(sys.argv) == 1:
print "Please specify a vtk file name."
print "Example:"
print " ", sys.argv[0], " vol_200.vtk"
sys.exit(1)
#Set the vtk input file name
vtkfile = sys.argv[1]
import vtk
#Free stream values, q_inf = 0.5 * rho * (velocity)**2
GAMMA = 1.4
Mach = 0.8395
pre_inf = 1.0/(GAMMA*Mach**2)
q_inf = 0.5
#Bounding box containing the geometry
#should not contain outer boundary
xmin, xmax = 4, 7
ymin, ymax = 4, 6
zmin, zmax = 0.01, 1.5
#Normal to the cut plane
Nx = 0
Ny = 0
Nz = 1
#Origin, some point on the cut plane
Ox = 0
Oy = 0
#List of cut planes to extract
#these are section for which experimental data is available
span = 1.1963
zcuts = [0.2*span, 0.44*span, 0.65*span, 0.8*span, 0.9*span, 0.95*span, 0.99*span]
#Fine cell/line which has p as first point
def NextCell(p,d):
ncells = d.GetNumberOfCells()
for i in range(0,ncells):
cell = d.GetCell(i)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
if p == p1:
return i
#Read the unstructured grid data
reader = vtk.vtkUnstructuredGridReader()
reader.ReadAllScalarsOn()
reader.ReadAllVectorsOn()
reader.SetFileName(vtkfile)
reader.Update()
data = reader.GetOutput()
#Loop over cut-planes
ncut = 0
for Oz in zcuts:
print "Extracting data for z =", Oz
#Define cut plane normal to z-axis
p = vtk.vtkPlane()
p.SetNormal(Nx, Ny, Nz)
p.SetOrigin(Ox, Oy, Oz)
#Make cutter and apply it to data
cutter=vtk.vtkCutter()
cutter.SetCutFunction(p)
cutter.SetInput(data)
cutter.Update()
out = cutter.GetOutput()
#Save cut data to file
#Comment this if you dont want the cut-plane data
cfile = 'cut' + str(int(ncut)) + '.vtp'
w = vtk.vtkXMLPolyDataWriter()
w.SetFileName(cfile)
w.SetInput(out)
w.Write()
#Extract boundary, includes outer boundary
bd = vtk.vtkFeatureEdges()
bd.BoundaryEdgesOn()
bd.ColoringOff()
bd.FeatureEdgesOff()
bd.ManifoldEdgesOff()
bd.AddInput(out)
bd.Update()
shape = bd.GetOutput()
if shape.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
# Now extract only the shape not the outer boundary
g = vtk.vtkGeometryFilter()
g.ExtentClippingOn()
g.SetExtent(xmin,xmax,ymin,ymax,zmin,zmax)
g.SetInput(shape)
g.Update()
geom = g.GetOutput()
#Clean geom to remove unused points
#Not necessary, can be removed I think
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(geom)
cleaner.Update()
geom = cleaner.GetOutput()
if geom.GetNumberOfPoints() == 0:
print "No points found, something wrong !!!"
#Extract pressure from point data
ptdata = geom.GetPointData()
arrayid = ptdata.SetActiveScalars("pressure")
pressure = ptdata.GetArray(arrayid)
#Write shape points into file
sfile = 'section' + str(int(ncut)) + '.dat'
numCells = geom.GetNumberOfCells()
f = open(sfile, 'w')
cellid = 0
for i in range(0,numCells+1):
cell = geom.GetCell(cellid)
p1 = cell.GetPointId(0)
p2 = cell.GetPointId(1)
pt = geom.GetPoint(p1)
pre = pressure.GetValue(p1)
Cp = -(pre - pre_inf)/q_inf
spt = str(pt[0]) + " " + str(pt[1]) + " " + str(pt[2])
spt = spt + " " + str(Cp) + "\n"
f.write(spt)
cellid = NextCell(p2,geom)
f.close()
ncut = ncut + 1
| Python |
# -*- coding: utf-8 -*-
import os
import os.path
import sys
import time
def removeFile(targetDir, targetPrefix):
timestring = time.strftime("%Y%m%d", (time.localtime(time.time()-3*86400)));
deletetime = int(timestring)*100
for filename in os.listdir(targetDir):
targetFile = os.path.join(targetDir, filename)
if os.path.isfile(targetFile):
spilt_filename = filename.split(targetPrefix)
if len(spilt_filename) == 2 and int(spilt_filename[1]) < deletetime:
os.remove(targetFile)
print "delete %s" % (targetFile,)
def renameFile(targetDir, targetName):
dstString = targetName + "." + time.strftime("%Y%m%d%H");
for filename in os.listdir(targetDir):
if targetName == filename:
srcFile = os.path.join(targetDir, filename)
dstFile = os.path.join(targetDir, dstString)
if os.path.isfile(srcFile):
print "'%s' rename to '%s'" % (srcFile, dstFile,)
os.rename(srcFile, dstFile)
break
def printHelp():
print "--- remove ---"
print "python logmanager.py remove dir fileprefix"
print "只保留4天之内的日志,比如log/下有很多日志文件,如ciqueue.log.2011081521"
print "执行 python logmanager.py remove log 'ciqueue.log.' 即可删除4天前的日志,保留今天在内的4天日志"
print "--- rename ---"
print "python logmanager.py rename dir filename"
print "用于日志切分,比如log/下有日志文件sdk.log,需要切分为sdk.log.YYYYMMDDHH,如sdk.log.2011081521"
print "执行 python logmanager.py rename log 'sdk.log' 即可切割为当前的sdk.log.YYYYMMDDHH"
if __name__ == "__main__":
if len(sys.argv) != 4:
printHelp()
sys.exit(1)
if sys.argv[1] != "rename" and sys.argv[1] != "remove":
printHelp()
sys.exit(1)
if sys.argv[1] == "remove":
removeFile(sys.argv[2], sys.argv[3])
else:
renameFile(sys.argv[2], sys.argv[3])
| Python |
import re
from SCons.Script import * # the usual scons stuff you get in a SConscript
def generate(env):
"""
Add builders and construction variables for the
SubstInFile tool.
Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
from the source to the target.
The values of SUBST_DICT first have any construction variables expanded
(its keys are not expanded).
If a value of SUBST_DICT is a python callable function, it is called and
the result is expanded as the value.
If there's more than one source and more than one target, each target gets
substituted from the corresponding source.
"""
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile
for (k,v) in dict.items():
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
raise SCons.Errors.UserError, "Can't write target file %s"%targetfile
return 0 # success
def subst_in_file(target, source, env):
if not env.has_key('SUBST_DICT'):
raise SCons.Errors.UserError, "SubstInFile requires SUBST_DICT to be set."
d = dict(env['SUBST_DICT']) # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v()).replace('\\','\\\\')
elif SCons.Util.is_String(v):
d[k] = env.subst(v).replace('\\','\\\\')
else:
raise SCons.Errors.UserError, "SubstInFile: key %s: %s must be a string or callable"%(k, repr(v))
for (t,s) in zip(target, source):
return do_subst_in_file(str(t), str(s), d)
def subst_in_file_string(target, source, env):
"""This is what gets printed on the console."""
return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
for (t,s) in zip(target, source)])
def subst_emitter(target, source, env):
"""Add dependency from substituted SUBST_DICT to target.
Returns original target, source tuple unchanged.
"""
d = env['SUBST_DICT'].copy() # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k]=env.subst(v)
Depends(target, SCons.Node.Python.Value(d))
return target, source
## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env):
"""
Make sure tool exists.
"""
return True
| Python |
import fnmatch
import os
def generate( env ):
def Glob( env, includes = None, excludes = None, dir = '.' ):
"""Adds Glob( includes = Split( '*' ), excludes = None, dir = '.')
helper function to environment.
Glob both the file-system files.
includes: list of file name pattern included in the return list when matched.
excludes: list of file name pattern exluced from the return list.
Example:
sources = env.Glob( ("*.cpp", '*.h'), "~*.cpp", "#src" )
"""
def filterFilename(path):
abs_path = os.path.join( dir, path )
if not os.path.isfile(abs_path):
return 0
fn = os.path.basename(path)
match = 0
for include in includes:
if fnmatch.fnmatchcase( fn, include ):
match = 1
break
if match == 1 and not excludes is None:
for exclude in excludes:
if fnmatch.fnmatchcase( fn, exclude ):
match = 0
break
return match
if includes is None:
includes = ('*',)
elif type(includes) in ( type(''), type(u'') ):
includes = (includes,)
if type(excludes) in ( type(''), type(u'') ):
excludes = (excludes,)
dir = env.Dir(dir).abspath
paths = os.listdir( dir )
def makeAbsFileNode( path ):
return env.File( os.path.join( dir, path ) )
nodes = filter( filterFilename, paths )
return map( makeAbsFileNode, nodes )
from SCons.Script import Environment
Environment.Glob = Glob
def exists(env):
"""
Tool always exists.
"""
return True
| Python |
"""tarball
Tool-specific initialization for tarball.
"""
## Commands to tackle a command based implementation:
##to unpack on the fly...
##gunzip < FILE.tar.gz | tar xvf -
##to pack on the fly...
##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz
import os.path
import SCons.Builder
import SCons.Node.FS
import SCons.Util
try:
import gzip
import tarfile
internal_targz = 1
except ImportError:
internal_targz = 0
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
if internal_targz:
def targz(target, source, env):
def archive_name( path ):
path = os.path.normpath( os.path.abspath( path ) )
common_path = os.path.commonprefix( (base_dir, path) )
archive_name = path[len(common_path):]
return archive_name
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
tar.add(path, archive_name(path) )
compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
target_path = str(target[0])
fileobj = gzip.GzipFile( target_path, 'wb', compression )
tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
for source in source:
source_path = str(source)
if source.isdir():
os.path.walk(source_path, visit, tar)
else:
tar.add(source_path, archive_name(source_path) ) # filename, arcname
tar.close()
targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
def makeBuilder( emitter = None ):
return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARGZ_SUFFIX',
multi = 1)
TarGzBuilder = makeBuilder()
def generate(env):
"""Add Builders and construction variables for zip to an Environment.
The following environnement variables may be set:
TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level).
TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative
to something other than top-dir).
"""
env['BUILDERS']['TarGz'] = TarGzBuilder
env['TARGZ_COM'] = targzAction
env['TARGZ_COMPRESSION_LEVEL'] = TARGZ_DEFAULT_COMPRESSION_LEVEL # range 0-9
env['TARGZ_SUFFIX'] = '.tar.gz'
env['TARGZ_BASEDIR'] = env.Dir('.') # Sources archive name are made relative to that directory.
else:
def generate(env):
pass
def exists(env):
return internal_targz
| Python |
import os
import os.path
from fnmatch import fnmatch
import targz
##def DoxyfileParse(file_contents):
## """
## Parse a Doxygen source file and return a dictionary of all the values.
## Values will be strings and lists of strings.
## """
## data = {}
##
## import shlex
## lex = shlex.shlex(instream = file_contents, posix = True)
## lex.wordchars += "*+./-:"
## lex.whitespace = lex.whitespace.replace("\n", "")
## lex.escape = ""
##
## lineno = lex.lineno
## last_backslash_lineno = lineno
## token = lex.get_token()
## key = token # the first token should be a key
## last_token = ""
## key_token = False
## next_key = False
## new_data = True
##
## def append_data(data, key, new_data, token):
## if new_data or len(data[key]) == 0:
## data[key].append(token)
## else:
## data[key][-1] += token
##
## while token:
## if token in ['\n']:
## if last_token not in ['\\']:
## key_token = True
## elif token in ['\\']:
## pass
## elif key_token:
## key = token
## key_token = False
## else:
## if token == "+=":
## if not data.has_key(key):
## data[key] = list()
## elif token == "=":
## data[key] = list()
## else:
## append_data( data, key, new_data, token )
## new_data = True
##
## last_token = token
## token = lex.get_token()
##
## if last_token == '\\' and token != '\n':
## new_data = False
## append_data( data, key, new_data, '\\' )
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
## if len(v) == 0:
## data.pop(k)
##
## # items in the following list will be kept as lists and not converted to strings
## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
## continue
##
## if len(v) == 1:
## data[k] = v[0]
##
## return data
##
##def DoxySourceScan(node, env, path):
## """
## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
## any files used to generate docs to the list of source files.
## """
## default_file_patterns = [
## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
## '*.py',
## ]
##
## default_exclude_patterns = [
## '*~',
## ]
##
## sources = []
##
## data = DoxyfileParse(node.get_contents())
##
## if data.get("RECURSIVE", "NO") == "YES":
## recursive = True
## else:
## recursive = False
##
## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
##
## for node in data.get("INPUT", []):
## if os.path.isfile(node):
## sources.add(node)
## elif os.path.isdir(node):
## if recursive:
## for root, dirs, files in os.walk(node):
## for f in files:
## filename = os.path.join(root, f)
##
## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
##
## if pattern_check and not exclude_check:
## sources.append(filename)
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
## sources = map( lambda path: env.File(path), sources )
## return sources
##
##
##def DoxySourceScanCheck(node, env):
## """Check if we should scan this file"""
## return os.path.isfile(node.path)
def srcDistEmitter(source, target, env):
## """Doxygen Doxyfile emitter"""
## # possible output formats and their default values and output locations
## output_formats = {
## "HTML": ("YES", "html"),
## "LATEX": ("YES", "latex"),
## "RTF": ("NO", "rtf"),
## "MAN": ("YES", "man"),
## "XML": ("NO", "xml"),
## }
##
## data = DoxyfileParse(source[0].get_contents())
##
## targets = []
## out_dir = data.get("OUTPUT_DIRECTORY", ".")
##
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
##
## # don't clobber targets
## for node in targets:
## env.Precious(node)
##
## # set up cleaning stuff
## for node in targets:
## env.Clean(node, node)
##
## return (targets, source)
return (target,source)
def generate(env):
"""
Add builders and construction variables for the
SrcDist tool.
"""
## doxyfile_scanner = env.Scanner(
## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
## )
if targz.exists(env):
srcdist_builder = targz.makeBuilder( srcDistEmitter )
env['BUILDERS']['SrcDist'] = srcdist_builder
def exists(env):
"""
Make sure srcdist exists.
"""
return targz.exists(env)
| Python |
"""
Notes:
- shared library support is buggy: it assumes that a static and dynamic library can be build from the same object files. This is not true on many platforms. For this reason it is only enabled on linux-gcc at the current time.
To add a platform:
- add its name in options allowed_values below
- add tool initialization for this platform. Search for "if platform == 'suncc'" as an example.
"""
import os
import os.path
import sys
JSONCPP_VERSION = open(File('#version').abspath,'rt').read().strip()
DIST_DIR = '#dist'
options = Variables()
options.Add( EnumVariable('platform',
'Platform (compiler/stl) used to build the project',
'msvc71',
allowed_values='suncc vacpp mingw msvc6 msvc7 msvc71 msvc80 linux-gcc'.split(),
ignorecase=2) )
try:
platform = ARGUMENTS['platform']
if platform == 'linux-gcc':
CXX = 'g++' # not quite right, but env is not yet available.
import commands
version = commands.getoutput('%s -dumpversion' %CXX)
platform = 'linux-gcc-%s' %version
print "Using platform '%s'" %platform
LD_LIBRARY_PATH = os.environ.get('LD_LIBRARY_PATH', '')
LD_LIBRARY_PATH = "%s:libs/%s" %(LD_LIBRARY_PATH, platform)
os.environ['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH
print "LD_LIBRARY_PATH =", LD_LIBRARY_PATH
except KeyError:
print 'You must specify a "platform"'
sys.exit(2)
print "Building using PLATFORM =", platform
rootbuild_dir = Dir('#buildscons')
build_dir = os.path.join( '#buildscons', platform )
bin_dir = os.path.join( '#bin', platform )
lib_dir = os.path.join( '#libs', platform )
sconsign_dir_path = Dir(build_dir).abspath
sconsign_path = os.path.join( sconsign_dir_path, '.sconsign.dbm' )
# Ensure build directory exist (SConsignFile fail otherwise!)
if not os.path.exists( sconsign_dir_path ):
os.makedirs( sconsign_dir_path )
# Store all dependencies signature in a database
SConsignFile( sconsign_path )
def make_environ_vars():
"""Returns a dictionnary with environment variable to use when compiling."""
# PATH is required to find the compiler
# TEMP is required for at least mingw
vars = {}
for name in ('PATH', 'TEMP', 'TMP'):
if name in os.environ:
vars[name] = os.environ[name]
return vars
env = Environment( ENV = make_environ_vars(),
toolpath = ['scons-tools'],
tools=[] ) #, tools=['default'] )
if platform == 'suncc':
env.Tool( 'sunc++' )
env.Tool( 'sunlink' )
env.Tool( 'sunar' )
env.Append( CCFLAGS = ['-mt'] )
elif platform == 'vacpp':
env.Tool( 'default' )
env.Tool( 'aixcc' )
env['CXX'] = 'xlC_r' #scons does not pick-up the correct one !
# using xlC_r ensure multi-threading is enabled:
# http://publib.boulder.ibm.com/infocenter/pseries/index.jsp?topic=/com.ibm.vacpp7a.doc/compiler/ref/cuselect.htm
env.Append( CCFLAGS = '-qrtti=all',
LINKFLAGS='-bh:5' ) # -bh:5 remove duplicate symbol warning
elif platform == 'msvc6':
env['MSVS_VERSION']='6.0'
for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
env.Tool( tool )
env['CXXFLAGS']='-GR -GX /nologo /MT'
elif platform == 'msvc70':
env['MSVS_VERSION']='7.0'
for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
env.Tool( tool )
env['CXXFLAGS']='-GR -GX /nologo /MT'
elif platform == 'msvc71':
env['MSVS_VERSION']='7.1'
for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
env.Tool( tool )
env['CXXFLAGS']='-GR -GX /nologo /MT'
elif platform == 'msvc80':
env['MSVS_VERSION']='8.0'
for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
env.Tool( tool )
env['CXXFLAGS']='-GR -EHsc /nologo /MT'
elif platform == 'mingw':
env.Tool( 'mingw' )
env.Append( CPPDEFINES=[ "WIN32", "NDEBUG", "_MT" ] )
elif platform.startswith('linux-gcc'):
env.Tool( 'default' )
env.Append( LIBS = ['pthread'], CCFLAGS = "-Wall" )
env['SHARED_LIB_ENABLED'] = True
else:
print "UNSUPPORTED PLATFORM."
env.Exit(1)
env.Tool('targz')
env.Tool('srcdist')
env.Tool('globtool')
env.Append( CPPPATH = ['#include'],
LIBPATH = lib_dir )
short_platform = platform
if short_platform.startswith('msvc'):
short_platform = short_platform[2:]
# Notes: on Windows you need to rebuild the source for each variant
# Build script does not support that yet so we only build static libraries.
# This also fails on AIX because both dynamic and static library ends with
# extension .a.
env['SHARED_LIB_ENABLED'] = env.get('SHARED_LIB_ENABLED', False)
env['LIB_PLATFORM'] = short_platform
env['LIB_LINK_TYPE'] = 'lib' # static
env['LIB_CRUNTIME'] = 'mt'
env['LIB_NAME_SUFFIX'] = '${LIB_PLATFORM}_${LIB_LINK_TYPE}${LIB_CRUNTIME}' # must match autolink naming convention
env['JSONCPP_VERSION'] = JSONCPP_VERSION
env['BUILD_DIR'] = env.Dir(build_dir)
env['ROOTBUILD_DIR'] = env.Dir(rootbuild_dir)
env['DIST_DIR'] = DIST_DIR
if 'TarGz' in env['BUILDERS']:
class SrcDistAdder:
def __init__( self, env ):
self.env = env
def __call__( self, *args, **kw ):
apply( self.env.SrcDist, (self.env['SRCDIST_TARGET'],) + args, kw )
env['SRCDIST_BUILDER'] = env.TarGz
else: # If tarfile module is missing
class SrcDistAdder:
def __init__( self, env ):
pass
def __call__( self, *args, **kw ):
pass
env['SRCDIST_ADD'] = SrcDistAdder( env )
env['SRCDIST_TARGET'] = os.path.join( DIST_DIR, 'jsoncpp-src-%s.tar.gz' % env['JSONCPP_VERSION'] )
env_testing = env.Clone( )
env_testing.Append( LIBS = ['json_${LIB_NAME_SUFFIX}'] )
def buildJSONExample( env, target_sources, target_name ):
env = env.Clone()
env.Append( CPPPATH = ['#'] )
exe = env.Program( target=target_name,
source=target_sources )
env['SRCDIST_ADD']( source=[target_sources] )
global bin_dir
return env.Install( bin_dir, exe )
def buildJSONTests( env, target_sources, target_name ):
jsontests_node = buildJSONExample( env, target_sources, target_name )
check_alias_target = env.Alias( 'check', jsontests_node, RunJSONTests( jsontests_node, jsontests_node ) )
env.AlwaysBuild( check_alias_target )
def buildUnitTests( env, target_sources, target_name ):
jsontests_node = buildJSONExample( env, target_sources, target_name )
check_alias_target = env.Alias( 'check', jsontests_node,
RunUnitTests( jsontests_node, jsontests_node ) )
env.AlwaysBuild( check_alias_target )
def buildLibrary( env, target_sources, target_name ):
static_lib = env.StaticLibrary( target=target_name + '_${LIB_NAME_SUFFIX}',
source=target_sources )
global lib_dir
env.Install( lib_dir, static_lib )
if env['SHARED_LIB_ENABLED']:
shared_lib = env.SharedLibrary( target=target_name + '_${LIB_NAME_SUFFIX}',
source=target_sources )
env.Install( lib_dir, shared_lib )
env['SRCDIST_ADD']( source=[target_sources] )
Export( 'env env_testing buildJSONExample buildLibrary buildJSONTests buildUnitTests' )
def buildProjectInDirectory( target_directory ):
global build_dir
target_build_dir = os.path.join( build_dir, target_directory )
target = os.path.join( target_directory, 'sconscript' )
SConscript( target, build_dir=target_build_dir, duplicate=0 )
env['SRCDIST_ADD']( source=[target] )
def runJSONTests_action( target, source = None, env = None ):
# Add test scripts to python path
jsontest_path = Dir( '#test' ).abspath
sys.path.insert( 0, jsontest_path )
data_path = os.path.join( jsontest_path, 'data' )
import runjsontests
return runjsontests.runAllTests( os.path.abspath(source[0].path), data_path )
def runJSONTests_string( target, source = None, env = None ):
return 'RunJSONTests("%s")' % source[0]
import SCons.Action
ActionFactory = SCons.Action.ActionFactory
RunJSONTests = ActionFactory(runJSONTests_action, runJSONTests_string )
def runUnitTests_action( target, source = None, env = None ):
# Add test scripts to python path
jsontest_path = Dir( '#test' ).abspath
sys.path.insert( 0, jsontest_path )
import rununittests
return rununittests.runAllTests( os.path.abspath(source[0].path) )
def runUnitTests_string( target, source = None, env = None ):
return 'RunUnitTests("%s")' % source[0]
RunUnitTests = ActionFactory(runUnitTests_action, runUnitTests_string )
env.Alias( 'check' )
srcdist_cmd = env['SRCDIST_ADD']( source = """
AUTHORS README.txt SConstruct
""".split() )
env.Alias( 'src-dist', srcdist_cmd )
buildProjectInDirectory( 'src/jsontestrunner' )
buildProjectInDirectory( 'src/lib_json' )
buildProjectInDirectory( 'src/test_lib_json' )
#print env.Dump()
| Python |
import os.path
def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
"""Makes sure that all sources have the specified eol sequence (default: unix)."""
if not os.path.isfile( path ):
raise ValueError( 'Path "%s" is not a file' % path )
try:
f = open(path, 'rb')
except IOError, msg:
print >> sys.stderr, "%s: I/O Error: %s" % (file, str(msg))
return False
try:
raw_lines = f.readlines()
finally:
f.close()
fixed_lines = [line.rstrip('\r\n') + eol for line in raw_lines]
if raw_lines != fixed_lines:
print '%s =>' % path,
if not is_dry_run:
f = open(path, "wb")
try:
f.writelines(fixed_lines)
finally:
f.close()
if verbose:
print is_dry_run and ' NEED FIX' or ' FIXED'
return True
##
##
##
##def _do_fix( is_dry_run = True ):
## from waftools import antglob
## python_sources = antglob.glob( '.',
## includes = '**/*.py **/wscript **/wscript_build',
## excludes = antglob.default_excludes + './waf.py',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
## for path in python_sources:
## _fix_python_source( path, is_dry_run )
##
## cpp_sources = antglob.glob( '.',
## includes = '**/*.cpp **/*.h **/*.inl',
## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
## for path in cpp_sources:
## _fix_source_eol( path, is_dry_run )
##
##
##def dry_fix(context):
## _do_fix( is_dry_run = True )
##
##def fix(context):
## _do_fix( is_dry_run = False )
##
##def shutdown():
## pass
##
##def check(context):
## # Unit tests are run when "check" target is used
## ut = UnitTest.unit_test()
## ut.change_to_testfile_dir = True
## ut.want_to_see_test_output = True
## ut.want_to_see_test_error = True
## ut.run()
## ut.print_results()
| Python |
#!/usr/bin/env python
# encoding: utf-8
# Baptiste Lepilleur, 2009
from dircache import listdir
import re
import fnmatch
import os.path
# These fnmatch expressions are used by default to prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
prune_dirs = '.git .bzr .hg .svn _MTN _darcs CVS SCCS '
# These fnmatch expressions are used by default to exclude files and dirs
# while doing the recursive traversal in the glob_impl method of glob function.
##exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split()
# These ant_glob expressions are used by default to exclude files and dirs and also prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
default_excludes = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/_darcs
**/_darcs/**
**/.DS_Store '''
DIR = 1
FILE = 2
DIR_LINK = 4
FILE_LINK = 8
LINKS = DIR_LINK | FILE_LINK
ALL_NO_LINK = DIR | FILE
ALL = DIR | FILE | LINKS
_ANT_RE = re.compile( r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)' )
def ant_pattern_to_re( ant_pattern ):
"""Generates a regular expression from the ant pattern.
Matching convention:
**/a: match 'a', 'dir/a', 'dir1/dir2/a'
a/**/b: match 'a/b', 'a/c/b', 'a/d/c/b'
*.py: match 'script.py' but not 'a/script.py'
"""
rex = ['^']
next_pos = 0
sep_rex = r'(?:/|%s)' % re.escape( os.path.sep )
## print 'Converting', ant_pattern
for match in _ANT_RE.finditer( ant_pattern ):
## print 'Matched', match.group()
## print match.start(0), next_pos
if match.start(0) != next_pos:
raise ValueError( "Invalid ant pattern" )
if match.group(1): # /**/
rex.append( sep_rex + '(?:.*%s)?' % sep_rex )
elif match.group(2): # **/
rex.append( '(?:.*%s)?' % sep_rex )
elif match.group(3): # /**
rex.append( sep_rex + '.*' )
elif match.group(4): # *
rex.append( '[^/%s]*' % re.escape(os.path.sep) )
elif match.group(5): # /
rex.append( sep_rex )
else: # somepath
rex.append( re.escape(match.group(6)) )
next_pos = match.end()
rex.append('$')
return re.compile( ''.join( rex ) )
def _as_list( l ):
if isinstance(l, basestring):
return l.split()
return l
def glob(dir_path,
includes = '**/*',
excludes = default_excludes,
entry_type = FILE,
prune_dirs = prune_dirs,
max_depth = 25):
include_filter = [ant_pattern_to_re(p) for p in _as_list(includes)]
exclude_filter = [ant_pattern_to_re(p) for p in _as_list(excludes)]
prune_dirs = [p.replace('/',os.path.sep) for p in _as_list(prune_dirs)]
dir_path = dir_path.replace('/',os.path.sep)
entry_type_filter = entry_type
def is_pruned_dir( dir_name ):
for pattern in prune_dirs:
if fnmatch.fnmatch( dir_name, pattern ):
return True
return False
def apply_filter( full_path, filter_rexs ):
"""Return True if at least one of the filter regular expression match full_path."""
for rex in filter_rexs:
if rex.match( full_path ):
return True
return False
def glob_impl( root_dir_path ):
child_dirs = [root_dir_path]
while child_dirs:
dir_path = child_dirs.pop()
for entry in listdir( dir_path ):
full_path = os.path.join( dir_path, entry )
## print 'Testing:', full_path,
is_dir = os.path.isdir( full_path )
if is_dir and not is_pruned_dir( entry ): # explore child directory ?
## print '===> marked for recursion',
child_dirs.append( full_path )
included = apply_filter( full_path, include_filter )
rejected = apply_filter( full_path, exclude_filter )
if not included or rejected: # do not include entry ?
## print '=> not included or rejected'
continue
link = os.path.islink( full_path )
is_file = os.path.isfile( full_path )
if not is_file and not is_dir:
## print '=> unknown entry type'
continue
if link:
entry_type = is_file and FILE_LINK or DIR_LINK
else:
entry_type = is_file and FILE or DIR
## print '=> type: %d' % entry_type,
if (entry_type & entry_type_filter) != 0:
## print ' => KEEP'
yield os.path.join( dir_path, entry )
## else:
## print ' => TYPE REJECTED'
return list( glob_impl( dir_path ) )
if __name__ == "__main__":
import unittest
class AntPatternToRETest(unittest.TestCase):
## def test_conversion( self ):
## self.assertEqual( '^somepath$', ant_pattern_to_re( 'somepath' ).pattern )
def test_matching( self ):
test_cases = [ ( 'path',
['path'],
['somepath', 'pathsuffix', '/path', '/path'] ),
( '*.py',
['source.py', 'source.ext.py', '.py'],
['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c'] ),
( '**/path',
['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'],
['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath'] ),
( 'path/**',
['path/a', 'path/path/a', 'path//'],
['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a'] ),
( '/**/path',
['/path', '/a/path', '/a/b/path/path', '/path/path'],
['path', 'path/', 'a/path', '/pathsuffix', '/somepath'] ),
( 'a/b',
['a/b'],
['somea/b', 'a/bsuffix', 'a/b/c'] ),
( '**/*.py',
['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'],
['script.pyc', 'script.pyo', 'a.py/b'] ),
( 'src/**/*.py',
['src/a.py', 'src/dir/a.py'],
['a/src/a.py', '/src/a.py'] ),
]
for ant_pattern, accepted_matches, rejected_matches in list(test_cases):
def local_path( paths ):
return [ p.replace('/',os.path.sep) for p in paths ]
test_cases.append( (ant_pattern, local_path(accepted_matches), local_path( rejected_matches )) )
for ant_pattern, accepted_matches, rejected_matches in test_cases:
rex = ant_pattern_to_re( ant_pattern )
print 'ant_pattern:', ant_pattern, ' => ', rex.pattern
for accepted_match in accepted_matches:
print 'Accepted?:', accepted_match
self.assert_( rex.match( accepted_match ) is not None )
for rejected_match in rejected_matches:
print 'Rejected?:', rejected_match
self.assert_( rex.match( rejected_match ) is None )
unittest.main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
# Baptiste Lepilleur, 2009
from dircache import listdir
import re
import fnmatch
import os.path
# These fnmatch expressions are used by default to prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
prune_dirs = '.git .bzr .hg .svn _MTN _darcs CVS SCCS '
# These fnmatch expressions are used by default to exclude files and dirs
# while doing the recursive traversal in the glob_impl method of glob function.
##exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split()
# These ant_glob expressions are used by default to exclude files and dirs and also prune the directory tree
# while doing the recursive traversal in the glob_impl method of glob function.
default_excludes = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/_darcs
**/_darcs/**
**/.DS_Store '''
DIR = 1
FILE = 2
DIR_LINK = 4
FILE_LINK = 8
LINKS = DIR_LINK | FILE_LINK
ALL_NO_LINK = DIR | FILE
ALL = DIR | FILE | LINKS
_ANT_RE = re.compile( r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)' )
def ant_pattern_to_re( ant_pattern ):
"""Generates a regular expression from the ant pattern.
Matching convention:
**/a: match 'a', 'dir/a', 'dir1/dir2/a'
a/**/b: match 'a/b', 'a/c/b', 'a/d/c/b'
*.py: match 'script.py' but not 'a/script.py'
"""
rex = ['^']
next_pos = 0
sep_rex = r'(?:/|%s)' % re.escape( os.path.sep )
## print 'Converting', ant_pattern
for match in _ANT_RE.finditer( ant_pattern ):
## print 'Matched', match.group()
## print match.start(0), next_pos
if match.start(0) != next_pos:
raise ValueError( "Invalid ant pattern" )
if match.group(1): # /**/
rex.append( sep_rex + '(?:.*%s)?' % sep_rex )
elif match.group(2): # **/
rex.append( '(?:.*%s)?' % sep_rex )
elif match.group(3): # /**
rex.append( sep_rex + '.*' )
elif match.group(4): # *
rex.append( '[^/%s]*' % re.escape(os.path.sep) )
elif match.group(5): # /
rex.append( sep_rex )
else: # somepath
rex.append( re.escape(match.group(6)) )
next_pos = match.end()
rex.append('$')
return re.compile( ''.join( rex ) )
def _as_list( l ):
if isinstance(l, basestring):
return l.split()
return l
def glob(dir_path,
includes = '**/*',
excludes = default_excludes,
entry_type = FILE,
prune_dirs = prune_dirs,
max_depth = 25):
include_filter = [ant_pattern_to_re(p) for p in _as_list(includes)]
exclude_filter = [ant_pattern_to_re(p) for p in _as_list(excludes)]
prune_dirs = [p.replace('/',os.path.sep) for p in _as_list(prune_dirs)]
dir_path = dir_path.replace('/',os.path.sep)
entry_type_filter = entry_type
def is_pruned_dir( dir_name ):
for pattern in prune_dirs:
if fnmatch.fnmatch( dir_name, pattern ):
return True
return False
def apply_filter( full_path, filter_rexs ):
"""Return True if at least one of the filter regular expression match full_path."""
for rex in filter_rexs:
if rex.match( full_path ):
return True
return False
def glob_impl( root_dir_path ):
child_dirs = [root_dir_path]
while child_dirs:
dir_path = child_dirs.pop()
for entry in listdir( dir_path ):
full_path = os.path.join( dir_path, entry )
## print 'Testing:', full_path,
is_dir = os.path.isdir( full_path )
if is_dir and not is_pruned_dir( entry ): # explore child directory ?
## print '===> marked for recursion',
child_dirs.append( full_path )
included = apply_filter( full_path, include_filter )
rejected = apply_filter( full_path, exclude_filter )
if not included or rejected: # do not include entry ?
## print '=> not included or rejected'
continue
link = os.path.islink( full_path )
is_file = os.path.isfile( full_path )
if not is_file and not is_dir:
## print '=> unknown entry type'
continue
if link:
entry_type = is_file and FILE_LINK or DIR_LINK
else:
entry_type = is_file and FILE or DIR
## print '=> type: %d' % entry_type,
if (entry_type & entry_type_filter) != 0:
## print ' => KEEP'
yield os.path.join( dir_path, entry )
## else:
## print ' => TYPE REJECTED'
return list( glob_impl( dir_path ) )
if __name__ == "__main__":
import unittest
class AntPatternToRETest(unittest.TestCase):
## def test_conversion( self ):
## self.assertEqual( '^somepath$', ant_pattern_to_re( 'somepath' ).pattern )
def test_matching( self ):
test_cases = [ ( 'path',
['path'],
['somepath', 'pathsuffix', '/path', '/path'] ),
( '*.py',
['source.py', 'source.ext.py', '.py'],
['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c'] ),
( '**/path',
['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'],
['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath'] ),
( 'path/**',
['path/a', 'path/path/a', 'path//'],
['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a'] ),
( '/**/path',
['/path', '/a/path', '/a/b/path/path', '/path/path'],
['path', 'path/', 'a/path', '/pathsuffix', '/somepath'] ),
( 'a/b',
['a/b'],
['somea/b', 'a/bsuffix', 'a/b/c'] ),
( '**/*.py',
['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'],
['script.pyc', 'script.pyo', 'a.py/b'] ),
( 'src/**/*.py',
['src/a.py', 'src/dir/a.py'],
['a/src/a.py', '/src/a.py'] ),
]
for ant_pattern, accepted_matches, rejected_matches in list(test_cases):
def local_path( paths ):
return [ p.replace('/',os.path.sep) for p in paths ]
test_cases.append( (ant_pattern, local_path(accepted_matches), local_path( rejected_matches )) )
for ant_pattern, accepted_matches, rejected_matches in test_cases:
rex = ant_pattern_to_re( ant_pattern )
print 'ant_pattern:', ant_pattern, ' => ', rex.pattern
for accepted_match in accepted_matches:
print 'Accepted?:', accepted_match
self.assert_( rex.match( accepted_match ) is not None )
for rejected_match in rejected_matches:
print 'Rejected?:', rejected_match
self.assert_( rex.match( rejected_match ) is None )
unittest.main()
| Python |
# module
| Python |
import os.path
import gzip
import tarfile
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
def make_tarball(tarball_path, sources, base_dir, prefix_dir=''):
"""Parameters:
tarball_path: output path of the .tar.gz file
sources: list of sources to include in the tarball, relative to the current directory
base_dir: if a source file is in a sub-directory of base_dir, then base_dir is stripped
from path in the tarball.
prefix_dir: all files stored in the tarball be sub-directory of prefix_dir. Set to ''
to make them child of root.
"""
base_dir = os.path.normpath( os.path.abspath( base_dir ) )
def archive_name( path ):
"""Makes path relative to base_dir."""
path = os.path.normpath( os.path.abspath( path ) )
common_path = os.path.commonprefix( (base_dir, path) )
archive_name = path[len(common_path):]
if os.path.isabs( archive_name ):
archive_name = archive_name[1:]
return os.path.join( prefix_dir, archive_name )
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
path_in_tar = archive_name(path)
tar.add(path, path_in_tar )
compression = TARGZ_DEFAULT_COMPRESSION_LEVEL
tar = tarfile.TarFile.gzopen( tarball_path, 'w', compresslevel=compression )
try:
for source in sources:
source_path = source
if os.path.isdir( source ):
os.path.walk(source_path, visit, tar)
else:
path_in_tar = archive_name(source_path)
tar.add(source_path, path_in_tar ) # filename, arcname
finally:
tar.close()
def decompress( tarball_path, base_dir ):
"""Decompress the gzipped tarball into directory base_dir.
"""
# !!! This class method is not documented in the online doc
# nor is bz2open!
tar = tarfile.TarFile.gzopen(tarball_path, mode='r')
try:
tar.extractall( base_dir )
finally:
tar.close()
| Python |
"""Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
Example of invocation (use to test the script):
python makerelease.py --force --retag --platform=msvc6,msvc71,msvc80,mingw -ublep 0.5.0 0.6.0-dev
Example of invocation when doing a release:
python makerelease.py 0.5.0 0.6.0-dev
"""
import os.path
import subprocess
import sys
import doxybuild
import subprocess
import xml.etree.ElementTree as ElementTree
import shutil
import urllib2
import tempfile
import os
import time
from devtools import antglob, fixeol, tarball
SVN_ROOT = 'https://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/'
SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
def set_version( version ):
with open('version','wb') as f:
f.write( version.strip() )
def rmdir_if_exist( dir_path ):
if os.path.isdir( dir_path ):
shutil.rmtree( dir_path )
class SVNError(Exception):
pass
def svn_command( command, *args ):
cmd = ['svn', '--non-interactive', command] + list(args)
print 'Running:', ' '.join( cmd )
process = subprocess.Popen( cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
error = SVNError( 'SVN command failed:\n' + stdout )
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
stdout = svn_command( 'status', '--xml' )
etree = ElementTree.fromstring( stdout )
msg = []
for entry in etree.getiterator( 'entry' ):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
if msg:
msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
return '\n'.join( msg )
def svn_join_url( base_url, suffix ):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
def svn_check_if_tag_exist( tag_url ):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
list_stdout = svn_command( 'list', tag_url )
except SVNError, e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
# otherwise ignore error, meaning tag does not exist
return False
return True
def svn_commit( message ):
"""Commit the sandbox, providing the specified comment.
"""
svn_command( 'ci', '-m', message )
def svn_tag_sandbox( tag_url, message ):
"""Makes a tag based on the sandbox revisions.
"""
svn_command( 'copy', '-m', message, '.', tag_url )
def svn_remove_tag( tag_url, message ):
"""Removes an existing tag.
"""
svn_command( 'delete', '-m', message, tag_url )
def svn_export( tag_url, export_dir ):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
rmdir_if_exist( export_dir )
svn_command( 'export', tag_url, export_dir )
def fix_sources_eol( dist_dir ):
"""Set file EOL for tarball distribution.
"""
print 'Preparing exported source file EOL for distribution...'
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob( dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs )
unix_sources = antglob.glob( dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs )
for path in win_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
for path in unix_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
def download( url, target_path ):
"""Download file represented by url to target_path.
"""
f = urllib2.urlopen( url )
try:
data = f.read()
finally:
f.close()
fout = open( target_path, 'wb' )
try:
fout.write( data )
finally:
fout.close()
def check_compile( distcheck_top_dir, platform ):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
print 'Running:', ' '.join( cmd )
log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
flog = open( log_path, 'wb' )
try:
process = subprocess.Popen( cmd,
stdout=flog,
stderr=subprocess.STDOUT,
cwd=distcheck_top_dir )
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
def write_tempfile( content, **kwargs ):
fd, path = tempfile.mkstemp( **kwargs )
f = os.fdopen( fd, 'wt' )
try:
f.write( content )
finally:
f.close()
return path
class SFTPError(Exception):
pass
def run_sftp_batch( userhost, sftp, batch, retry=0 ):
path = write_tempfile( batch, suffix='.sftp', text=True )
# psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in xrange(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
print heading, ' '.join( cmd )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode != 0:
error = SFTPError( 'SFTP batch failed:\n' + stdout )
else:
break
if error:
raise error
return stdout
def sourceforge_web_synchro( sourceforge_project, doc_dir,
user=None, sftp='sftp' ):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
stdout = run_sftp_batch( userhost, sftp, """
cd htdocs
dir
exit
""" )
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
line = line.strip()
if not collect and line.endswith('> dir'):
collect = True
elif collect and line.endswith('> exit'):
break
elif collect == 1:
collect = 2
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
existing_paths.add( path[0] )
upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print 'Removing the following file from web:'
print '\n'.join( paths_to_remove )
stdout = run_sftp_batch( userhost, sftp, """cd htdocs
rm %s
exit""" % ' '.join(paths_to_remove) )
print 'Uploading %d files:' % len(upload_paths)
batch_size = 10
upload_paths = list(upload_paths)
start_time = time.time()
for index in xrange(0,len(upload_paths),batch_size):
paths = upload_paths[index:index+batch_size]
file_per_sec = (time.time() - start_time) / (index+1)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print '%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec)
run_sftp_batch( userhost, sftp, """cd htdocs
lcd %s
mput %s
exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
run_sftp_batch( userhost, sftp, """
mput %s
exit
""" % (' '.join(paths),) )
def main():
usage = """%prog release_version next_dev_version
Update 'version' file to release_version and commit.
Generates the document tarball.
Tags the sandbox revision with release_version.
Update 'version' file to next_dev_version and commit.
Performs an svn export of tag release version, and build a source tarball.
Must be started in the project top directory.
Warning: --force should only be used when developping/testing the release script.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--dot', dest="dot_path", action='store', default=doxybuild.find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=doxybuild.find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--force', dest="ignore_pending_commit", action='store_true', default=False,
help="""Ignore pending commit. [Default: %default]""")
parser.add_option('--retag', dest="retag_release", action='store_true', default=False,
help="""Overwrite release existing tag if it exist. [Default: %default]""")
parser.add_option('-p', '--platforms', dest="platforms", action='store', default='',
help="""Comma separated list of platform passed to scons for build check.""")
parser.add_option('--no-test', dest="no_test", action='store_true', default=False,
help="""Skips build check.""")
parser.add_option('--no-web', dest="no_web", action='store_true', default=False,
help="""Do not update web site.""")
parser.add_option('-u', '--upload-user', dest="user", action='store',
help="""Sourceforge user for SFTP documentation upload.""")
parser.add_option('--sftp', dest='sftp', action='store', default=doxybuild.find_program('psftp', 'sftp'),
help="""Path of the SFTP compatible binary used to upload the documentation.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 2:
parser.error( 'release_version missing on command-line.' )
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
parser.error( 'You must specify either --platform or --no-test option.' )
if options.ignore_pending_commit:
msg = ''
else:
msg = check_no_pending_commit()
if not msg:
print 'Setting version to', release_version
set_version( release_version )
svn_commit( 'Release ' + release_version )
tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
if svn_check_if_tag_exist( tag_url ):
if options.retag_release:
svn_remove_tag( tag_url, 'Overwriting previous tag' )
else:
print 'Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url
sys.exit( 1 )
svn_tag_sandbox( tag_url, 'Release ' + release_version )
print 'Generated doxygen document...'
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
doc_distcheck_dir = 'dist/doccheck'
tarball.decompress( doc_tarball_path, doc_distcheck_dir )
doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
export_dir = 'dist/export'
svn_export( tag_url, export_dir )
fix_sources_eol( export_dir )
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print 'Generating source tarball to', source_tarball_path
tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print 'Decompressing source tarball to', distcheck_dir
rmdir_if_exist( distcheck_dir )
tarball.decompress( source_tarball_path, distcheck_dir )
scons_local_path = 'dist/scons-local.tar.gz'
print 'Downloading scons-local to', scons_local_path
download( SCONS_LOCAL_URL, scons_local_path )
print 'Decompressing scons-local to', distcheck_top_dir
tarball.decompress( scons_local_path, distcheck_top_dir )
# Run compilation
print 'Compiling decompressed tarball'
all_build_status = True
for platform in options.platforms.split(','):
print 'Testing platform:', platform
build_status, log_path = check_compile( distcheck_top_dir, platform )
print 'see build log:', log_path
print build_status and '=> ok' or '=> FAILED'
all_build_status = all_build_status and build_status
if not build_status:
print 'Testing failed on at least one platform, aborting...'
svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
sys.exit(1)
if options.user:
if not options.no_web:
print 'Uploading documentation using user', options.user
sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
print 'Completed documentation upload'
print 'Uploading source and documentation tarballs for release using user', options.user
sourceforge_release_tarball( SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
user=options.user, sftp=options.sftp )
print 'Source and doc release tarballs uploaded'
else:
print 'No upload user specified. Web site and download tarbal were not uploaded.'
print 'Tarball can be found at:', doc_tarball_path
# Set next version number and commit
set_version( next_version )
svn_commit( 'Released ' + release_version )
else:
sys.stderr.write( msg + '\n' )
if __name__ == '__main__':
main()
| Python |
"""Script to generate doxygen documentation.
"""
import re
import os
import os.path
import sys
import shutil
from devtools import tarball
def find_program(*filenames):
"""find a program in folders path_lst, and sets env[var]
@param filenames: a list of possible names of the program to search for
@return: the full path of the filename if found, or '' if filename could not be found
"""
paths = os.environ.get('PATH', '').split(os.pathsep)
suffixes = ('win32' in sys.platform ) and '.exe .com .bat .cmd' or ''
for filename in filenames:
for name in [filename+ext for ext in suffixes.split()]:
for directory in paths:
full_path = os.path.join(directory, name)
if os.path.isfile(full_path):
return full_path
return ''
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
print "Can't read source file %s"%sourcefile
raise
for (k,v) in dict.items():
v = v.replace('\\','\\\\')
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
print "Can't write target file %s"%targetfile
raise
def run_doxygen(doxygen_path, config_file, working_dir, is_silent):
config_file = os.path.abspath( config_file )
doxygen_path = doxygen_path
old_cwd = os.getcwd()
try:
os.chdir( working_dir )
cmd = [doxygen_path, config_file]
print 'Running:', ' '.join( cmd )
try:
import subprocess
except:
if os.system( ' '.join( cmd ) ) != 0:
print 'Documentation generation failed'
return False
else:
if is_silent:
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
else:
process = subprocess.Popen( cmd )
stdout, _ = process.communicate()
if process.returncode:
print 'Documentation generation failed:'
print stdout
return False
return True
finally:
os.chdir( old_cwd )
def build_doc( options, make_release=False ):
if make_release:
options.make_tarball = True
options.with_dot = True
options.with_html_help = True
options.with_uml_look = True
options.open = False
options.silent = True
version = open('version','rt').read().strip()
output_dir = 'dist/doxygen' # relative to doc/doxyfile location.
if not os.path.isdir( output_dir ):
os.makedirs( output_dir )
top_dir = os.path.abspath( '.' )
html_output_dirname = 'jsoncpp-api-html-' + version
tarball_path = os.path.join( 'dist', html_output_dirname + '.tar.gz' )
warning_log_path = os.path.join( output_dir, '../jsoncpp-doxygen-warning.log' )
html_output_path = os.path.join( output_dir, html_output_dirname )
def yesno( bool ):
return bool and 'YES' or 'NO'
subst_keys = {
'%JSONCPP_VERSION%': version,
'%DOC_TOPDIR%': '',
'%TOPDIR%': top_dir,
'%HTML_OUTPUT%': os.path.join( '..', output_dir, html_output_dirname ),
'%HAVE_DOT%': yesno(options.with_dot),
'%DOT_PATH%': os.path.split(options.dot_path)[0],
'%HTML_HELP%': yesno(options.with_html_help),
'%UML_LOOK%': yesno(options.with_uml_look),
'%WARNING_LOG_PATH%': os.path.join( '..', warning_log_path )
}
if os.path.isdir( output_dir ):
print 'Deleting directory:', output_dir
shutil.rmtree( output_dir )
if not os.path.isdir( output_dir ):
os.makedirs( output_dir )
do_subst_in_file( 'doc/doxyfile', 'doc/doxyfile.in', subst_keys )
ok = run_doxygen( options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent )
if not options.silent:
print open(warning_log_path, 'rb').read()
index_path = os.path.abspath(os.path.join(subst_keys['%HTML_OUTPUT%'], 'index.html'))
print 'Generated documentation can be found in:'
print index_path
if options.open:
import webbrowser
webbrowser.open( 'file://' + index_path )
if options.make_tarball:
print 'Generating doc tarball to', tarball_path
tarball_sources = [
output_dir,
'README.txt',
'version'
]
tarball_basedir = os.path.join( output_dir, html_output_dirname )
tarball.make_tarball( tarball_path, tarball_sources, tarball_basedir, html_output_dirname )
return tarball_path, html_output_dirname
def main():
usage = """%prog
Generates doxygen documentation in build/doxygen.
Optionaly makes a tarball of the documentation to dist/.
Must be started in the project top directory.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--with-dot', dest="with_dot", action='store_true', default=False,
help="""Enable usage of DOT to generate collaboration diagram""")
parser.add_option('--dot', dest="dot_path", action='store', default=find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--with-html-help', dest="with_html_help", action='store_true', default=False,
help="""Enable generation of Microsoft HTML HELP""")
parser.add_option('--no-uml-look', dest="with_uml_look", action='store_false', default=True,
help="""Generates DOT graph without UML look [Default: False]""")
parser.add_option('--open', dest="open", action='store_true', default=False,
help="""Open the HTML index in the web browser after generation""")
parser.add_option('--tarball', dest="make_tarball", action='store_true', default=False,
help="""Generates a tarball of the documentation in dist/ directory""")
parser.add_option('-s', '--silent', dest="silent", action='store_true', default=False,
help="""Hides doxygen output""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
build_doc( options )
if __name__ == '__main__':
main()
| Python |
import sys
import os
import os.path
import subprocess
from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
class TestProxy(object):
def __init__( self, test_exe_path, use_valgrind=False ):
self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
self.use_valgrind = use_valgrind
def run( self, options ):
if self.use_valgrind:
cmd = VALGRIND_CMD.split()
else:
cmd = []
cmd.extend( [self.test_exe_path, '--test-auto'] + options )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
return False, stdout
return True, stdout
def runAllTests( exe_path, use_valgrind=False ):
test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
status, test_names = test_proxy.run( ['--list-tests'] )
if not status:
print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
return 1
test_names = [name.strip() for name in test_names.strip().split('\n')]
failures = []
for name in test_names:
print 'TESTING %s:' % name,
succeed, result = test_proxy.run( ['--test', name] )
if succeed:
print 'OK'
else:
failures.append( (name, result) )
print 'FAILED'
failed_count = len(failures)
pass_count = len(test_names) - failed_count
if failed_count:
print
for name, result in failures:
print result
print '%d/%d tests passed (%d failure(s))' % (
pass_count, len(test_names), failed_count)
return 1
else:
print 'All %d tests passed' % len(test_names)
return 0
def main():
from optparse import OptionParser
parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 1:
parser.error( 'Must provides at least path to test_lib_json executable.' )
sys.exit( 1 )
exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
sys.exit( exit_code )
if __name__ == '__main__':
main()
| Python |
# removes all files created during testing
import glob
import os
paths = []
for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
paths += glob.glob( 'data/' + pattern )
for path in paths:
os.unlink( path )
| Python |
import glob
import os.path
for path in glob.glob( '*.json' ):
text = file(path,'rt').read()
target = os.path.splitext(path)[0] + '.expected'
if os.path.exists( target ):
print 'skipping:', target
else:
print 'creating:', target
file(target,'wt').write(text)
| Python |
# Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.loads( input )
fout = file( actual_path, 'wt' )
valueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.dumps( value )
#rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 )
| Python |
import sys
import os
import os.path
from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes '
def compareOutputs( expected, actual, message ):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
max_line_to_compare = min( len(expected), len(actual) )
for index in xrange(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
break
if diff_line == 0 and len(expected) != len(actual):
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
def safeGetLine( lines, index ):
index += -1
if index >= len(lines):
return ''
return lines[index].strip()
return """ Difference in %s at line %d:
Expected: '%s'
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
safeGetLine(actual,diff_line) )
def safeReadFile( path ):
try:
return file( path, 'rt' ).read()
except IOError, e:
return '<File "%s" is missing: %s>' % (path,e)
def runAllTests( jsontest_executable_path, input_dir = None,
use_valgrind=False, with_json_checker=False ):
if not input_dir:
input_dir = os.path.join( os.getcwd(), 'data' )
tests = glob( os.path.join( input_dir, '*.json' ) )
if with_json_checker:
test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
else:
test_jsonchecker = []
failed_tests = []
valgrind_path = use_valgrind and VALGRIND_CMD or ''
for input_path in tests + test_jsonchecker:
is_json_checker_test = input_path in test_jsonchecker
print 'TESTING:', input_path,
options = is_json_checker_test and '--json-checker' or ''
pipe = os.popen( "%s%s %s %s" % (
valgrind_path, jsontest_executable_path, options,
input_path) )
process_output = pipe.read()
status = pipe.close()
if is_json_checker_test:
expect_failure = os.path.basename( input_path ).startswith( 'fail' )
if expect_failure:
if status is None:
print 'FAILED'
failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
safeReadFile(input_path)) )
else:
print 'OK'
else:
if status is not None:
print 'FAILED'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
print 'OK'
else:
base_path = os.path.splitext(input_path)[0]
actual_output = safeReadFile( base_path + '.actual' )
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
file(base_path + '.process-output','wt').write( process_output )
if status:
print 'parsing failed'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
expected_output = file( expected_output_path, 'rt' ).read()
detail = ( compareOutputs( expected_output, actual_output, 'input' )
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
if detail:
print 'FAILED'
failed_tests.append( (input_path, detail) )
else:
print 'OK'
if failed_tests:
print
print 'Failure details:'
for failed_test in failed_tests:
print '* Test', failed_test[0]
print failed_test[1]
print
print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
len(failed_tests) )
return 1
else:
print 'All %d tests passed.' % len(tests)
return 0
def main():
from optparse import OptionParser
parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.add_option("-c", "--with-json-checker",
action="store_true", dest="with_json_checker", default=False,
help="run all the tests from the official JSONChecker test suite of json.org")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) < 1 or len(args) > 2:
parser.error( 'Must provides at least path to jsontestrunner executable.' )
sys.exit( 1 )
jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
if len(args) > 1:
input_path = os.path.normpath( os.path.abspath( args[1] ) )
else:
input_path = None
status = runAllTests( jsontest_executable_path, input_path,
use_valgrind=options.valgrind, with_json_checker=options.with_json_checker )
sys.exit( status )
if __name__ == '__main__':
main()
| Python |
# ----------------------------------------------------------------------
# idl_lex.py
#
# A lexer for IDL.
# ----------------------------------------------------------------------
import sys
sys.path.insert(0,"../..")
import ply.lex as lex
# Reserved words
reserved = (
'CLASS', 'METHOD',
'UINT32_T', 'INT32_T', 'UINT64_T', 'INT64_T', 'STRING', 'BINARY', 'DOUBLE',
'BALANCE', 'DEFAULT',
)
tokens = reserved + (
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Delimeters ( ) [ ] { } ; = == ,
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'SEMI', 'EQUALS', 'EEQUALS', 'COMMA',
)
# Completely ignored characters
t_ignore = ' \t\x0c'
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_SEMI = r';'
t_COMMA = r','
# Assignment operators
t_EQUALS = r'='
t_EEQUALS = r'=='
# Identifiers and reserved words
reserved_map = { }
for r in reserved:
reserved_map[r.lower()] = r
def t_ID(t):
r'[A-Za-z_][\w_]*'
t.type = reserved_map.get(t.value,"ID")
return t
# Integer literal
t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comments
def t_comment(t):
r'/\*(.|\n)*?\*/|//.*|\#.*'
t.lexer.lineno += t.value.count('\n')
def t_error(t):
print("Illegal character %s" % repr(t.value[0]))
t.lexer.skip(1)
def t_preprocessor(t):
r'\#(.)*?\n'
t.lexer.lineno += 1
lexer = lex.lex(optimize=1)
if __name__ == "__main__":
lex.runmain(lexer)
| Python |
# -----------------------------------------------------------------------------
# cparse.py
#
# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed.
# -----------------------------------------------------------------------------
import sys, copy
import api_lex
import ply.yacc as yacc
import ply.lex as lex
# Get the token map
tokens = api_lex.tokens
classs = {}
classs['methods'] = []
curparamlist=[]
curclassname = ""
curclasslist = []
curmethodname = ""
curmethodlist = []
curparamname = "__init__"
trace = 0
curtype = ""
curbalance = ""
curdefault = ""
mytrace = 0
debug = 0
# class
def p_all_class_specifier(t):
'all_class_specifier : class_specifier_list'
pass
def p_class_specifier(t):
'class_specifier : CLASS ID LBRACE method_specifier_list RBRACE SEMI'
global trace
global curclassname, curclasslist, curmethodlist
curclassname = str(t[2])
trace += 1
if mytrace:
print trace, "classname: ", curclassname, "p_class_specifier"
for x in curclasslist:
if x['name'] == curclassname:
print "repeat class name in one unit", "name:", curclassname
sys.exit(1)
classdict = {}
classdict['name'] = curclassname
classdict['methodlist'] = copy.copy(curmethodlist)
curclasslist.append(copy.copy(classdict))
curmethodlist = []
curclassname = ""
pass
def p_class_specifier_list_1(t):
'class_specifier_list : class_specifier'
global trace
trace += 1
if mytrace:
print trace, "p_class_specifier_list_1"
pass
def p_class_specifier_list_2(t):
'class_specifier_list : class_specifier_list class_specifier'
global trace
trace += 1
if mytrace:
print trace, "p_class_specifier_list_2"
pass
def p_method_specifier(t):
'method_specifier : METHOD ID LPAREN param_specifier_list RPAREN SEMI'
global trace, curmethodname, curparamlist
curmethodname = str(t[2])
trace += 1
if mytrace:
print trace, "methodname: ", curmethodname, "p_method_specifier"
for x in curmethodlist:
if x['name'] == curmethodname:
print "repeat method name in one class", "name:", curmethodname
sys.exit(1)
methoddict = {}
methoddict['name'] = curmethodname
methoddict['paramlist'] = curparamlist
curmethodlist.append(copy.copy(methoddict))
curparamlist = []
pass
def p_method_specifier_list_1(t):
'method_specifier_list : method_specifier'
global trace
trace += 1
if mytrace:
print trace, "p_method_specifier_list_1"
pass
def p_method_specifier_list_2(t):
'method_specifier_list : method_specifier_list method_specifier'
global trace
trace += 1
if mytrace:
print trace, "p_method_specifier_list_2"
pass
def p_type_specifier(t):
'''type_specifier : UINT32_T
| INT32_T
| UINT64_T
| INT64_T
| BINARY
| STRING
| DOUBLE
| ID
'''
global trace, curparamname, curtype
trace += 1
curtype = str(t[1])
if mytrace:
print trace, "type:", curtype
# | struct_specifier
pass
def p_const_specifier(t):
'''const_specifier : ICONST
| FCONST
| SCONST
| CCONST
'''
global trace, curparamname, curparamlist, curdefault
trace += 1
curdefault = str(t[1])
assert len(curdefault) > 0
if mytrace:
print trace, "param default:", curdefault, "name", curparamname
pass
def p_param_specifier_1(t):
'param_specifier : BALANCE param_specifier'
global curparamname, curparamlist
assert curparamlist[1]['name'] == curparamname
curparamlist[1]['balance'] = 1
if mytrace:
print "balance one: ", curparamname, curparamlist
pass
def p_param_specifier_2(t):
'param_specifier : type_specifier ID LBRACKET const_specifier RBRACKET COMMA'
global trace, curparamname, curtype, curdefault
trace += 1
curparamname = str(t[2])
for x in curparamlist:
if x['name'] == curparamname:
print "repeat param name in one method", "name:", curparamname
sys.exit(1)
paramdict = {}
paramdict['name'] = curparamname
paramdict['type'] = 'list'
paramdict['lsize'] = int(curdefault)
curparamlist.append(paramdict)
curtype = ""
curdefault = ""
if mytrace:
print trace, "name", curparamlist[0]['name'], \
"default", curdefault, "p_param_specifier_2", curparamlist[0]
def p_param_specifier_3(t):
'param_specifier : type_specifier ID COMMA'
global trace, curparamname, curtype, curdefault
curparamname = str(t[2])
trace += 1
for x in curparamlist:
if x['name'] == curparamname:
print "repeat param name in one method", "name:", curparamname
sys.exit(1)
paramdict = {}
paramdict['name'] = curparamname
paramdict['type'] = curtype
curparamlist.append(paramdict)
curtype = ""
if len(curdefault) > 0:
for x in curparamlist:
if x['name'] == curparamname:
x['default'] = curdefault
curdefault = ""
if mytrace:
print trace, "name", curparamlist[0]['name'], \
"default", curdefault, "p_param_specifier_3", curparamlist[0]
pass
def p_param_specifier_4(t):
# 'param_specifier : type_specifier ID EQUALS DEFAULT LPAREN const_specifier RPAREN COMMA'
'param_specifier : type_specifier ID EQUALS DEFAULT LPAREN const_specifier RPAREN COMMA'
global trace, curparamname, curtype, curdefault
curparamname = str(t[2])
trace += 1
for x in curparamlist:
if x['name'] == curparamname:
print "repeat param name in one method", "name:", curparamname
sys.exit(1)
paramdict = {}
paramdict['name'] = curparamname
paramdict['type'] = curtype
curparamlist.append(paramdict)
curtype = ""
if len(curdefault) > 0:
for x in curparamlist:
if x['name'] == curparamname:
x['default'] = curdefault
curdefault = ""
if mytrace:
print trace, "name", curparamlist[0]['name'], \
"default", curdefault, "p_param_specifier_4", curparamlist[0],
pass
def p_param_specifier_5(t):
'param_specifier : type_specifier ID EEQUALS DEFAULT LPAREN const_specifier RPAREN COMMA'
global trace, curparamname, curtype, curdefault
curparamname = str(t[2])
for x in curparamlist:
if x['name'] == curparamname:
print "repeat param name in one method", "name:", curparamname
sys.exit(1)
paramdict = {}
paramdict['name'] = curparamname
paramdict['type'] = curtype
curparamlist.append(paramdict)
curtype = ""
if len(curdefault) > 0:
for x in curparamlist:
if x['name'] == curparamname:
x['default'] = curdefault
x['hide'] = 1
curdefault = ""
if mytrace:
print trace, "name", curparamlist[0]['name'], \
"hide default", curdefault, "p_param_specifier_4", curparamlist[0],
pass
def p_param_specifier_list_1(t):
'param_specifier_list : param_specifier'
pass
def p_param_specifier_list_2(t):
'param_specifier_list : param_specifier_list param_specifier'
pass
def p_error(t):
print("Whoa. We're hosed")
assert 0
def isint(string):
try:
int(string)
except ValueError:
return False
return True
def check(classdict):
for c in classdict:
# print "cccccccccccc", c
iclassname = c['name']
iclassdesc = c['methodlist']
if debug:
print "------------------------------------------"
print "class:", iclassname
assert iclassdesc != []
for m in iclassdesc:
imethodname = m['name']
imethoddesc = m['paramlist']
if debug:
print " method:", imethodname
hasbalance = 0
for p in imethoddesc:
# print "pppppppppppp", p
iparamname = p['name']
iparamdesc = p
curbalance = iparamdesc.has_key('balance')
if hasbalance and curbalance :
print "method", imethodname, "has two balance key, invalid"
sys.exit(1)
if curbalance:
curbalance = 1
hasbalance = 1
if iparamdesc['type'] == 'list':
print "param", iparamname, "is a list, can't be set to balance, stupid"
sys.exit(1)
else:
curbalance = 0
# type check
if debug:
print " ", iparamname, "-", "type:", iparamdesc['type'],
if debug:
if iparamdesc['type'] == 'list':
print "[", iparamdesc['lsize'], "]",
if debug:
print "balance", curbalance,
if iparamdesc.has_key('default') and iparamdesc.has_key('balance'):
print "param", iparamname, "has default value, can't be set to balance, stupid"
sys.exit(1)
pass
if iparamdesc.has_key('default'):
if iparamdesc['type'] == 'uint32_t' \
or iparamdesc['type'] == 'int32_t' \
or iparamdesc['type'] == 'uint64_t' \
or iparamdesc['type'] == 'int64_t':
if not isint(iparamdesc['default']):
print "param:", iparamname, "type:", iparamdesc['type'], \
"default:", iparamdesc['default'], "value type error."
sys.exit(1)
if debug:
print "default:", iparamdesc['default'],
else:
pass
if debug:
print
def parse(filename):
f = file(filename)
input=""
while True:
line = f.readline()
if len(line) == 0:
break
input += line
yacc.yacc(method='LALR')
yacc.parse(input, debug = 0)
check(curclasslist)
return curclasslist
| Python |
# -----------------------------------------------------------------------------
# cparse.py
#
# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed.
# -----------------------------------------------------------------------------
#
# TODO
# (1) list sub types
# (2) int-bitfield
#
import sys, os
import api_parse
class CodeGen:
__code = ""
__step = ""
__outfile = ""
def __init__(self):
self.__code = ""
self.__step = " "*4
def __init__(self, outfile, params_array):
self.__code = ""
self.__step = " "*4
self.__outfile = outfile
self.__params_array = params_array
def __del__(self):
pass
def __addcode(self, step, ecode):
# if len(ecode) > 80:
self.__code += self.__step * step + ecode + "\n"
def __printcode(self):
if len(self.__outfile) > 0:
# if os.path.exists(self.__outfile) :
# print "outfile exists, check the ",self.__outfile
f = open(self.__outfile, "w")
print >> f, self.__code
else:
print self.__code
def __python_type_check(self, paramname, mytype):
if mytype == "uint32_t" or mytype == "int32_t" or mytype == "uint64_t" or mytype == "int64_t" :
return "type(" + paramname + ") == int"
if mytype == "string" :
return "type(" + paramname + ") == str"
if mytype == "list":
return "type(" + paramname + ") == list"
return None
def parseapi(self, classlist):
self.__addcode(0, "# this is auto generated code by api-parser")
self.__addcode(0, "# you are not supposed to edit this file");
self.__addcode(0, "# feel free to mail scenbuffalo@gmail.com\n");
self.__addcode(0, "# import xhead_json\n");
for c in classlist:
iclassname = c['name']
iclassdesc = c['methodlist']
self.__addcode(0, "class "+iclassname+":");
self.__addcode(1, "def __init__(self):");
self.__addcode(2, "pass");
for m in iclassdesc:
imethodname = m['name']
imethoddesc = m['paramlist']
parampurelist = ""
paramlist = ""
balanceparam = ""
line = ""
for p in imethoddesc:
iparamname = p['name']
iparamdesc = p
if iparamdesc.has_key('default'):
pass
else:
paramlist += iparamname + ", "
parampurelist += iparamname + ", "
if iparamdesc.has_key('balance') and iparamdesc['balance']:
balanceparam = iparamname
for p in imethoddesc:
iparamname = p['name']
iparamdesc = p
if iparamdesc.has_key('default') and not iparamdesc.has_key('hide'):
paramlist += iparamname + " = "+iparamdesc['default']+", "
parampurelist += iparamname + ", "
else:
pass
# php is so tough
parampurelist = parampurelist.rstrip()[0:-1]
paramlist = paramlist.rstrip()[0:-1]
real_param = self.__params_array and "arrParam" or parampurelist
self.__addcode(1, "def "+"__check"+imethodname+"Param(self, "+real_param+"):");
line = "return 1"
for p in imethoddesc:
iparamname = self.__params_array and "arrParam['"+p['name']+"']" or p['name']
iparamdesc = p
typecheck = self.__python_type_check(iparamname, iparamdesc['type'])
if typecheck and not iparamdesc.has_key('hide'):
line += " and " + typecheck
line += ";"
self.__addcode(2, line)
real_param = self.__params_array and "arrParam" or paramlist
self.__addcode(1, "def "+imethodname+"Param(self, "+real_param+"):");
self.__addcode(2, "qPrama = {};")
self.__addcode(2, "if False == self."+"__check"+imethodname+"Param(" + parampurelist + ") :")
self.__addcode(3, "return False;")
for p in imethoddesc:
iparamname = self.__params_array and "arrParam['"+p['name']+"']" or p['name']
iparamdesc = p
if iparamdesc.has_key('default') and iparamdesc.has_key('hide'):
self.__addcode(2, "qPrama['"+p['name']+"'] " + "= " + iparamdesc['default'] +";")
else:
self.__addcode(2, "qPrama['"+p['name']+"'] " + "= " + iparamname +";")
self.__addcode(2, "return qPrama;")
real_param = self.__params_array and "arrParam" or paramlist
self.__addcode(1, "def "+imethodname+"(self, "+real_param+") :")
real_param = self.__params_array and "arrParam" or parampurelist
self.__addcode(2, "qPrama = self."+imethodname+"Param("+real_param+");")
self.__addcode(2, "if False == qPrama:")
self.__addcode(3, "return False;")
self.__addcode(2, "#xheadj = new xhead_json('"+iclassname+"');")
self.__addcode(2, "#return xheadj->talk(qPrama);")
self.__printcode()
if __name__ == "__main__":
if len(sys.argv) < 2:
print "python idl2php.py idlfile [paramsarray: 0|1]"
sys.exit(1)
myparams_array = 0
if len(sys.argv) == 3:
myparams_array = int(sys.argv[2])
ft = sys.argv[1].split('.')
if len(ft) < 2 or ft[-1] != "api":
print "idlfile nameformat error"
sys.exit(1)
out = ""
for t in ft[0:-1]:
out += str(t).capitalize() + '.'
out += "py"
cgen = CodeGen(outfile = out, params_array = myparams_array )
cgen.parseapi(api_parse.parse(sys.argv[1]))
| Python |
# -*- coding: utf-8 -*-
#socket server端
#获取socket构造及常量
import struct, socket, sys
if len(sys.argv) != 3 :
print "python queueReceiver.py port longconnect"
sys.exit(1)
#''代表服务器为localhost
myHost = ''
#在一个非保留端口号上进行监听
myPort = int(sys.argv[1])
longconnect = (0 != int(sys.argv[2]))
#设置一个TCP socket对象
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#绑定它至端口号
sockobj.bind((myHost, myPort))
#监听,允许100个连结
sockobj.listen(100)
#直到进程结束时才结束循环
count = 1
while True:
#等待下一个客户端连结
connection, address = sockobj.accept()
#连结是一个新的socket
print count, 'Server connected by ', address
count += 1
if not longconnect:
connection.close()
| Python |
import json
dd={}
dd["termlist"] = []
term = {}
term["term"] = "a[1]"
term["weight"] = 50
dd["termlist"].append(term)
term = {}
term["TERM"] = "b[1]"
term["WEIGHT"] = 50
dd["termlist"].append(term)
filt = []
f_type = {}
f_type["method"] = "EQUAL"
f_type["field"] = "type"
f_type["value"] = 4
filt.append(f_type)
f_duration = {}
f_duration["method"] = "ZONE"
f_duration["field"] = "duration"
f_duration["min"] = 4
f_duration["max"] = 8
filt.append(f_duration)
dd["filter"] = filt
dd["ranking"] = {}
dd["merger"] = "weight_merge"
limit = {}
limit["min"] = 0
limit["max"] = 15
dd["limit"] = limit
dd["groupby"] = "type"
#dd["sortby"] = "id"
print json.dumps(dd)
| Python |
import socket, struct, sys, json
if len(sys.argv) != 2:
print "python test.py query"
sys.exit(1)
FMT_XHEAD = "I16sIIII"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 1983));
dd = {}
dd['QUERY'] = sys.argv[1]
jsonstr = json.dumps(dd)
sbuf = struct.pack(FMT_XHEAD, 1234, "pyclient", 0, 0, 0, len(jsonstr))
sbuf += jsonstr
sock.send(sbuf)
rbuf = sock.recv(36)
log_id, srvname, headid, headversion, status, detail_len = struct.unpack(FMT_XHEAD, rbuf)
print "log_id[%u] srvname[%s] detail_len[%u]" % (log_id, srvname, detail_len)
if detail_len > 0:
rrbuf = sock.recv(detail_len)
print struct.unpack("I"*(detail_len/4), rrbuf)
| Python |
import socket, struct, sys, json
if len(sys.argv) != 4:
print "python flexseclt.py begin end step"
sys.exit(1)
if int(sys.argv[1]) > int(sys.argv[2]) :
print "begin > end"
sys.exit(1)
begin = int(sys.argv[1])
end = int(sys.argv[2]) + 1
step = int(sys.argv[3])
block_id = 0
FMT_XHEAD = "I16sIIII"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 1984));
for x in range(begin, end):
for k in range(0, 10):
dd = {}
dd['OPERATION'] = "INSERT"
dd['id'] = x + (step * 10 + k) * ( end-1 )
dd['uid'] = x
dd['title'] = "awesome#%u# search#%u# framwork#%u# " % (x, x, x)
dd['type'] = x%16
dd['content'] = "a[%u] b[%u] c[%u] d[%u] e[%u] f[%u] g[%u] h[%u] i[%u] j[%u]"\
" k[%u] l[%u] m[%u] n[%u] o[%u] p[%u] q[%u] r[%u] s[%u] t[%u] " % \
(x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,)
dd['tags'] = ["nasty*"+str(x), "sexy*"+str(x), "pretty*"+str(x)]
jsonstr = json.dumps(dd)
# print jsonstr
block_id += 1
sbuf = struct.pack(FMT_XHEAD, 123, "pyclient", 0, block_id, 0, len(jsonstr))
sbuf += jsonstr
sock.send(sbuf)
rbuf = sock.recv(36)
# print struct.unpack(FMT_XHEAD, rbuf)
| Python |
import socket, struct, sys, json
if len(sys.argv) != 5:
print "python restore.py ip port begin end"
sys.exit(1)
FMT_XHEAD = "I16sIIII"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((sys.argv[1], int(sys.argv[2])));
dd = {}
dd['OPERATION'] = "RESTORE"
dd['id_list'] = range(int(sys.argv[3]), 1 + int(sys.argv[4]))
jsonstr = json.dumps(dd)
sbuf = struct.pack(FMT_XHEAD, int(sys.argv[4]), "pyclient", 0, 0, 0, len(jsonstr))
sbuf += jsonstr
sock.send(sbuf)
rbuf = sock.recv(36)
print struct.unpack(FMT_XHEAD, rbuf)
| Python |
import socket, struct, sys, json
if len(sys.argv) != 5:
print "python delete.py ip port begin end"
sys.exit(1)
FMT_XHEAD = "I16sIIII"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((sys.argv[1], int(sys.argv[2])));
dd = {}
dd['OPERATION'] = "DELETE"
dd['id_list'] = range(int(sys.argv[3]), int(sys.argv[4]))
jsonstr = json.dumps(dd)
sbuf = struct.pack(FMT_XHEAD, int(sys.argv[4]), "pyclient", 0, 0, 0, len(jsonstr))
sbuf += jsonstr
sock.send(sbuf)
rbuf = sock.recv(36)
print struct.unpack(FMT_XHEAD, rbuf)
| Python |
import socket, struct, sys, json, creat_sign
def query(sock, strQuery) :
FMT_XHEAD = "I16sIIII"
dd = {}
dd['QUERY'] = strQuery
jsonstr = json.dumps(dd)
sbuf = struct.pack(FMT_XHEAD, 1234, "pyclient", 0, 0, 0, len(jsonstr))
sbuf += jsonstr
sock.send(sbuf)
rbuf = sock.recv(36)
log_id, srvname, headid, headversion, status, detail_len = struct.unpack(FMT_XHEAD, rbuf)
#print "log_id[%u] srvname[%s] detail_len[%u]" % (log_id, srvname, detail_len)
if detail_len > 0:
rrbuf = sock.recv(detail_len)
return struct.unpack("I"*(detail_len/4), rrbuf)
else:
return ()
if __name__ == "__main__":
if len(sys.argv) != 3:
print "python test.py query beginInnerID"
sys.exit(1)
strstr = "abcdefghijklmnopqrst"
innerID = int(sys.argv[2])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 1983));
for x in range(1, 999999):
for y in range(0,20):
strQuery = "%s[%u]" % (strstr[y], x)
postinglist = query(sock, strQuery)
if (postinglist):
prev = postinglist[0]
for z in range(1, len(postinglist)):
assert postinglist[z] == prev - 1
prev = postinglist[z]
else:
(s1, s2) = creat_sign.creat_sign(strQuery, len(strQuery))
print strQuery, (s2<<32) + s1, "------"
| Python |
# -*- coding: utf-8 -*-
import socket, struct, sys, json
if len(sys.argv) != 2:
print "python test.py query"
sys.exit(1)
dd = {}
dd["termlist"] = []
term1 = {}
term1["term"] = "美女"
term1["weight"] = 30
term1["synonyms"] = ["美人"]
dd["termlist"].append(term1)
term2 = {}
term2["term"] = sys.argv[1]
term2["weight"] = 10
dd["termlist"].append(term2)
term3 = {}
term3["term"] = "烦恼"
term3["weight"] = 60
term3["synonyms"] = []
dd["termlist"].append(term3)
#filtlist = []
#f_type = {}
#f_type["method"] = 0
#f_type["field"] = "type"
#f_type["value"] = 1
#filtlist.append(f_type)
##f_duration = {}
##f_duration["method"] = 3
##f_duration["field"] = "reserved"
##f_duration["min"] = 4
##f_duration["max"] = 8
##filtlist.append(f_duration)
#dd["filtlist"] = filtlist
#
#ranklist = []
#r_type = {}
#r_type["method"] = 1
#r_type["field"] = "type"
#r_type["value"] = 4
#ranklist.append(r_type)
##r_duration = {}
##r_duration["method"] = 3
##r_duration["field"] = "duration"
##r_duration["min"] = 4
##r_duration["max"] = 8
##ranklist.append(r_duration)
#dd["ranklist"] = ranklist
jsonstr = json.dumps(dd)
print jsonstr
FMT_XHEAD = "I16sIIII"
sbuf = struct.pack(FMT_XHEAD, 1234, "pyclient", 0, 0, 0, len(jsonstr))
sbuf += jsonstr
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 1983));
sock.send(sbuf)
rbuf = sock.recv(36)
log_id, srvname, headid, headversion, status, detail_len = struct.unpack(FMT_XHEAD, rbuf)
print "log_id[%u] srvname[%s] detail_len[%u]" % (log_id, srvname, detail_len)
if detail_len > 0:
rrbuf = sock.recv(detail_len)
print struct.unpack("I"*(detail_len/4), rrbuf)
| Python |
# 检测消息队列服务器进程是否存在
import json, struct, sys, socket, random, ciclient_config
class ciclient:
def __init__(self):
self.ServerList = ciclient_config.ciqueue_server_list
self.sock = None
def _rand_connect(self):
si = random.randint(0, len(self.ServerList))
rand_server_list = self.ServerList[si:] + self.ServerList[:si]
for serv in rand_server_list:
if self._connect(serv["Host"], serv["Port"]):
return True
return False
def _connect(self, strHost, intPort):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(0.1)
try:
self.sock.connect((strHost, intPort));
self.sock.settimeout(1)
return True
except socket.timeout:
print "connect TimeOut"
self.sock.close()
self.sock = None
return False
except socket.error, arg:
(errno, err_msg) = arg
self.sock.close()
self.sock = None
print "Connect server failed: %s, errno=%d" % (err_msg, errno)
return False
def _check_connect(self):
rstr="a"
if self.sock == None:
return self._rand_connect()
else :
try:
self.sock.setblocking(False)
rstr= self.sock.recv(1)
if rstr == "" or len(rstr) == 1:
print "remote server closed or left data len[%u]" % (len(rstr),)
self.sock.close()
self.sock = None
return self._rand_connect()
except socket.error, e:
self.sock.settimeout(1)
print 'rstr[%s] len[%u] socket:%s' % (rstr, len(rstr), e,)
return True
def monitor(self, log_id):
FMT_XHEAD = "I16sIII"
sbuf = struct.pack(FMT_XHEAD, log_id, "pymonitor", 0, 0, 0)
if self._check_connect():
try:
self.sock.send(sbuf)
except socket.error, arg:
print "error message [%s]" % (arg, )
return False
rbuf = self.sock.recv(32)
log_id, srvname, version, reserved, detail_len = struct.unpack(FMT_XHEAD, rbuf)
print "logid[%u] srvname[%s] version[%u] reserved[%u] detail_len[%u]" % (log_id, \
srvname, version, reserved, detail_len)
return reserved == 0
else:
return False
if __name__ == "__main__":
mycc = ciclient()
for x in range(int(sys.argv[1])):
mycc.monitor(x)
| Python |
import json, struct, sys, socket, random, ciclient_config
class ciclient:
def __init__(self):
self.ServerList = ciclient_config.ciqueue_server_list
self.sock = None
def _rand_connect(self):
si = random.randint(0, len(self.ServerList))
rand_server_list = self.ServerList[si:] + self.ServerList[:si]
for serv in rand_server_list:
if self._connect(serv["Host"], serv["Port"]):
return True
return False
def _connect(self, strHost, intPort):
# print "connect--------------------------------"
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(0.1)
try:
self.sock.connect((strHost, intPort));
self.sock.settimeout(1)
return True
except socket.timeout:
# print "connect TimeOut"
self.sock.close()
self.sock = None
return False
except socket.error, arg:
(errno, err_msg) = arg
self.sock.close()
self.sock = None
# print "Connect server failed: %s, errno=%d" % (err_msg, errno)
return False
def _check_connect(self):
rstr="a"
if self.sock == None:
return self._rand_connect()
else :
try:
self.sock.setblocking(False)
rstr= self.sock.recv(1)
if rstr == "" or len(rstr) == 1:
# print "remote server closed or left data len[%u]" % (len(rstr),)
self.sock.close()
self.sock = None
return self._rand_connect()
except socket.error, e:
self.sock.settimeout(1)
# print 'rstr[%s] len[%u] socket:%s' % (rstr, len(rstr), e,)
return True
def commit(self, log_id, queue_name, operation, dict4commit):
FMT_XHEAD = "I16sIIII"
dd = {}
dd['__QUEUE_NAME__'] = queue_name
dd['__OPERATION__'] = operation
dd['__OPERATION_BODY__'] = dict4commit
jsonstr = json.dumps(dd)
sbuf = struct.pack(FMT_XHEAD, log_id, "pyclient", 0, 0, 0, len(jsonstr))
sbuf += jsonstr
if self._check_connect():
try:
self.sock.send(sbuf)
except socket.error, arg:
# print "error message [%s]" % (arg, )
return False
rbuf = self.sock.recv(36)
log_id, srvname, version, reserved, status, detail_len = struct.unpack(FMT_XHEAD, rbuf)
# print "logid[%u] srvname[%s] version[%u] reserved[%u] status[%u] detail_len[%u]" % (log_id, \
# srvname, version, reserved, status, detail_len)
return reserved == 0
else:
return False
if __name__ == "__main__":
mycc = ciclient()
extra = "XXOO"*10
for x in range(int(sys.argv[1]), int(sys.argv[2])):
dd={}
dd["vid"] = x
dd["title"] = "beauty girls [%08u] " % (x,)
dd["title"] += extra
mycc.commit(x, "test", "test", dd)
| Python |
ciqueue_server_list = [ \
{"Host" : "127.0.0.1", "Port" : 2011}, \
]
| Python |
# -*- coding:utf-8 -*-
# sender程序的监控脚本
# 仅对配置中enable为1的消息通道进行监控
# 当一个消息通道由正常变为异常时,会发送一个警报
# 当一个消息通道由异常变为正常时,会发送一个喜报
# 可能存在的问题,不能检查到更新进度落后太多的问题,当程序性能很好时,这个问题是不会暴露的
# 如果需要考虑,则比较消息队列的最新进度和消息通道的当前进度即可。
import json, sys, time, os
def get_queue_offset(queue_file_dir, queue_file_name):
# 读取这个消息队列的最新进度
# 以最大的文件号和最大文件号的文件长度为标识
# 如下
# {'last_file_len': 3997440L, 'last_file_no': 0}
filelist = os.listdir(queue_file_dir)
max_file_no = -1
prefix = queue_file_name+"."
for qfile in filelist:
if 0 == qfile.find(prefix):
# 找到了一个文件
cur_file_no = int(qfile.replace(prefix, ''))
max_file_no = max(cur_file_no, max_file_no)
assert max_file_no >= 0
last_file_len = os.path.getsize(queue_file_dir + "/" + prefix + str(max_file_no))
queue_last_offset = {}
queue_last_offset['last_file_no'] = max_file_no
queue_last_offset['last_file_len'] = last_file_len
return queue_last_offset
def get_channel_offset(offset_file):
# 读取offset文件夹下的每个消息通道进度文件
# 以键值对的方式放入一个字典中并返回
# 字典内容如下
# {'offset': 833523976, 'block_id': 1615361, 'file_no': 4}
f=file(offset_file)
qoffset = {}
tmpstr = f.readline().strip().replace(" ", "");
lst = tmpstr.split(':')
qoffset[lst[0]] = int(lst[1])
tmpstr = f.readline().strip().replace(" ", "");
lst = tmpstr.split(':')
qoffset[lst[0]] = int(lst[1])
tmpstr = f.readline().strip().replace(" ", "");
lst = tmpstr.split(':')
qoffset[lst[0]] = int(lst[1])
return qoffset
if len(sys.argv) != 3:
print "python sender-monitor.py sender-running-dir check-interval-seconds"
sys.exit(1)
sender_runing_dir = sys.argv[1]
check_interval_seconds = int(sys.argv[2])
if check_interval_seconds < 10 :
print "check-interval-seconds tooo short, at least above 10s"
sys.exit(1)
config_file = sender_runing_dir+"/conf/sender.config.json"
cur_queue_status={}
try:
json_config = json.load(file(config_file))
except IOError:
print "file[%s] NOT exist." % (config_file)
sys.exit(1)
queue_file_dir = sender_runing_dir + "/data/" + json_config["qpath"]
queue_last_offset = get_queue_offset(queue_file_dir, json_config["qfile"])
while True:
# 每次重新读取配置文件
try:
json_config = json.load(file(config_file))
except IOError:
print "file[%s] NOT exist." % (config_file)
sys.exit(1)
# 获取消息队列的最大进度
cur_last_offset = get_queue_offset(queue_file_dir, json_config["qfile"])
queue_changed = (queue_last_offset != cur_last_offset)
queue_last_offset = cur_last_offset
# 调试日志
print "queue_changed: %u" % (queue_changed, )
for x in json_config["followers"]:
# 仅当消息队列的最新进度发生变化时(有新的消息进入消息队列了),对每个消息通道进行检查
if x["enable"] and queue_changed:
# 如果这个通道是开启的,则开始检查进度文件
offset_file = sender_runing_dir+"/offset/"+x['name']+".offset"
new_qoffset = get_channel_offset(offset_file)
if cur_queue_status.has_key(x['name']):
old_qoffset = cur_queue_status[x['name']]['qoffset']
status_ok = cur_queue_status[x['name']]['status_ok']
if new_qoffset == old_qoffset and status_ok == True:
# 正常状态 变成 异常状态,需要发送警报
print "queue[%s] update STOP. remote[%s:%u]" % (x['name'], x['host'], x['port'],)
cur_queue_status[x['name']]['status_ok'] = False
elif new_qoffset == old_qoffset and status_ok == False:
# 这个不需要报警了, 为调试日志
print "I know this guy[%s - %s:%u] in trouble now." % (x['name'], x['host'], x['port'],)
elif new_qoffset != old_qoffset and status_ok == False:
# 异常状态 变成 正常状态,需要发送喜报
print "queue[%s] update OK. remote[%s:%u]" % (x['name'], x['host'], x['port'],)
cur_queue_status[x['name']]['qoffset'] = new_qoffset
cur_queue_status[x['name']]['status_ok'] = True
elif new_qoffset != old_qoffset and status_ok == True:
# 这个不需要报警了, 为调试日志
print "I know this guy[%s - %s:%u] is OK." % (x['name'], x['host'], x['port'],)
cur_queue_status[x['name']]['qoffset'] = new_qoffset
else:
# 此通道刚刚开启或者是监控程序第一次启动
qstatus = {}
qstatus["qoffset"] = new_qoffset
qstatus["status_ok"] = True
cur_queue_status[x['name']] = qstatus
# 休眠指定的间隔,单位为秒
time.sleep(check_interval_seconds)
| Python |
#!/usr/bin/env python
from setuptools import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='Flipkart',
version='1.0',
packages=['fk'],
description='Flipkart python book search API',
author='Vengadanathan Srinivasan',
license = "BSD",
author_email='fantastic.next@gmail.com',
url='https://code.google.com/p/flipkart-books-python-api/wiki/README',
keywords = "Flipkart python book search API" ,
install_requires=['beautifulsoup4'],
long_description=read('README.txt'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"License :: OSI Approved :: BSD License",
"Operating System :: Microsoft",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS"
],
) | Python |
#Flipkart Books API
#Author Vengadanathan
import urllib2
import urllib
from bs4 import BeautifulSoup
#class for storing the each item of book search result
class Item:
pass
class Flipkart:
def __init__(self):
pass
#method that performs search for books with a given query on flipkart
#arguments are query = book name or part of book name
#page is the page number to retrieve , increment page number until you receive empty results which would be last page number
#default page value is 1
def search(self,query,page=1):
items = []
start = 1 + ( (page-1) * 24)
#flipkart URL used for querying the book
url = urllib2.urlopen("http://m.flipkart.com/m/m-search-all/searchCategory?q="+urllib.quote_plus(query)+"&store=buk&count=24&otracker=search&start="+str(start)).read()
soup = BeautifulSoup(url)
products = soup.find_all("li",{"class":"category-search-result"})
p = soup.find_all("span",{"class":"note"})[0].string.strip()
total = [int(s) for s in p.split('(')[1].split() if s.isdigit()][0]
print total
for product in products:
image = product.find_all("img",{"class":"product-image"})[0]['data-src']
product = product.find_all("div",{"class":"product-details"})[0]
title = product.find_all("span",{"class":"product-title"})[0].string.strip()
product_subtitle = product.find_all("span",{"class":"product-subtitle"})[0].string.strip()
product_mrp = None
product_fsp = None
try:
product_fsp = product.find_all("span",{"class":"product-price"})[0].find_all("span",{"class":"price-fsp"})[0].string.strip()
except:
pass
try:
product_mrp = product.find_all("span",{"class":"product-price"})[0].find_all("del",{"class":"price-mrp"})[0].string.strip()
except:
pass
item = Item()
item.image = image
item.title = title
item.product_subtitle = product_subtitle
item.product_fsp = product_fsp
item.product_mrp = product_mrp
items.append(item)
return items
#example program
#from Flipkart import Flipkart
#from Flipkart import Item
# f = Flipkart()
# items = f.search("data structure")
# for item in items:
# print item.image
# print item.title
# print item.product_subtitle
# print item.product_fsp
# print item.product_mrp
| Python |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import cv
def take(fileName):
cam = cv.CaptureFromCAM(0)
photo = cv.QueryFrame(cam)
fileName = fileName+'.png'
cv.SaveImage(fileName, photo)
return
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygame.mixer
import pygame.time
def beep(fileName, delay):
pygame.init()
beep = pygame.mixer.Sound(fileName)
beep.play()
pygame.time.delay(delay)
return
| Python |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import cv
def take(fileName):
cam = cv.CaptureFromCAM(0)
photo = cv.QueryFrame(cam)
fileName = fileName+'.png'
cv.SaveImage(fileName, photo)
return
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from numpy import *
from cv import *
from numpy import *
from numpy.linalg import *
from numpy.dual import eig
from numpy.oldnumeric.mlab import cov
def buyumebul(dosyaadi1,dosyaadi2,refadi1,refadi2):
sabit = 150
refres1 = LoadImage('data/'+refadi1)
refres2 = LoadImage('data/'+refadi2)
res1 = LoadImage('data/'+dosyaadi1)
res2 = LoadImage('data/'+dosyaadi2)
refgri1 = gri(refres1)
refgri2 = gri(refres2)
I = gri(res1)
J = gri(res2)
ref1iki = ikilik(refgri1,sabit)
ref2iki = ikilik(refgri2,sabit)
I = ikilik(I,sabit)
J = ikilik(J,sabit)
ref1kenar = kenarbul(ref1iki)
ref2kenar = kenarbul(ref2iki)
Ikenar = kenarbul(I)
Jkenar = kenarbul(J)
ref1,x1r,y1r = kutucukyarat(ref1kenar)
ref2,x2r,y2r = kutucukyarat(ref2kenar)
Ikutu,x1,y1 = kutucukyarat(Ikenar)
Jkutu,x2,y2 = kutucukyarat(Jkenar)
a,b = oranbul(x1r,y1r,x2r,y2r)
buyy1,buyx1 = oranbul(x1,y1,x1r,y1r)
#sx1,sy1 = GetSize(res1);sx1 *= buyx1;sy1 *= buyy1
#sx2,sy2 = GetSize(res2);sx2 *= buyx2;sy2 *= buyy2
#yeniresim1 = CreateImage((int(sx1),int(sy1)), IPL_DEPTH_8U, 1)
#yeniresim2 = CreateImage((int(sx2),int(sy2)), IPL_DEPTH_8U, 1)
#Resize(ref1iki,yeniresim1,CV_INTER_CUBIC)
#Resize(ref2iki,yeniresim2,CV_INTER_CUBIC)
print ' İlk resim için X eksenindeki büyüme: ',buyx1,' Y eksenindeki büyüme: ',buyy1
print ' X eksenindeki odak uzaklığı: ',int(odakbul(buyx1))
print ' Y eksenindeki odak uzaklığı: ',int(odakbul(buyy1))
#resmigoster(ref1iki,'Sayısal düzen: on.jpg')
#resmigoster(ref2iki,'Sayısal düzen: arka.jpg')
resmigoster(I,'Sayısal düzen: onmercek.jpg')
#resmigoster(J,'Sayısal düzen: arkamercek.jpg ')
resmigoster(Ikenar,'onmercek.jpg için kenarlar')
#resmigoster(Jkenar,'arkamercek.jpg için kenarlar')
#resmigoster(ref1kenar,'on.jpg için kenarlar')
#resmigoster(ref2kenar,'arka.jpg için kenarlar')
resmigoster(Ikutu,'onmercek.jpg için kutucuk')
#resmigoster(Jkutu,'arkamercek.jpg için kutucuk')
#resmigoster(ref1,'on.jpg için kutucuk')
#resmigoster(ref2,'arka.jpg için kutucuk')
#resmigoster(yeniresim1,'Büyütülmüş yeni resim 1')
#resmigoster(yeniresim2,'Büyütülmüş yeni resim 2')
return
def odakbul(m):
d = 36
do = 71
odak = m*do*d/(do+d)/(m-1)
return odak
def oranbul(x1,y1,x2,y2):
buyx = float(amax(x1)-amin(x1))/(amax(x2)-amin(x2))
buyy = float(amax(y1)-amin(y1))/(amax(y2)-amin(y2))
return buyx,buyy
def kutucukyarat(resim):
x=[];y=[];count=0
araresim = asarray(GetMat(resim))
boyutlar = araresim.shape
kutular = CreateImage(GetSize(resim), IPL_DEPTH_8U, 1)
j = boyutlar[1]/2
for i in xrange(0,boyutlar[0]):
if araresim[i][j]>0:
if araresim[i-1][j-1]<255:
kutular[i,j]=255
x.append(i)
y.append(j)
i = boyutlar[0]/2
for j in xrange(0,boyutlar[1]):
if araresim[i][j]>0:
if araresim[i-1][j-1]<255:
kutular[i,j]=255
x.append(i)
y.append(j)
return kutular,x,y
def ikilik(resim,sabit):
[nx,ny] = GetSize(resim)
araresim = GetMat(resim)
for i in xrange(0,nx-1):
for j in xrange(0,ny-1):
if araresim[j,i] > sabit:
araresim[j,i] = 255
else:
araresim[j,i] = 0
return resim
def kenarbul(resim):
for i in xrange(5):
Smooth(resim,resim,param1=7)
boyutlar = GetSize(resim)
kenarlar = CreateImage(boyutlar, IPL_DEPTH_8U, 1)
Canny(resim,kenarlar,40.0,60.0)
return kenarlar
def resmigoster(resim,baslik):
NamedWindow(baslik,1)
ShowImage(baslik,resim)
WaitKey(0)
return True
def buyume(x1,y1,x2,y2):
if len(x1)>=len(x2):
sinir = len(x2)
else:
sinir = len(x1)
A = zeros((sinir,2))
b = zeros((sinir,1))
for i in xrange(0,3):
if i%2==0:
A[i,0] = x1[i]
b[i] = x2[i]
else:
A[i,1] = y1[i]
b[i] = y2[i]
[buyx,buyy] = dot(pinv(dot(A.T,A)),dot(A.T,b))
return buyx,buyy
def gri(resim):
gri = CreateImage(GetSize(resim), IPL_DEPTH_8U, 1)
CvtColor(resim, gri,CV_RGB2GRAY)
return gri
def ana():
buyumebul('onmercek250mmxy.jpg','arkamercekxy.jpg','on.jpg','arka.jpg')
buyumebul('onmercek150mmx.jpg','arkamercekxy.jpg','on.jpg','arka.jpg')
return True
if __name__ == '__main__':
ana() | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygame.mixer
import pygame.time
def beep(fileName, delay):
pygame.init()
beep = pygame.mixer.Sound(fileName)
beep.play()
pygame.time.delay(delay)
return
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from numpy import *
from cv import *
from numpy import *
from numpy.linalg import *
from numpy.dual import eig
from numpy.oldnumeric.mlab import cov
def buyumebul(dosyaadi1,dosyaadi2,refadi1,refadi2):
sabit = 150
refres1 = LoadImage('data/'+refadi1)
refres2 = LoadImage('data/'+refadi2)
res1 = LoadImage('data/'+dosyaadi1)
res2 = LoadImage('data/'+dosyaadi2)
refgri1 = gri(refres1)
refgri2 = gri(refres2)
I = gri(res1)
J = gri(res2)
ref1iki = ikilik(refgri1,sabit)
ref2iki = ikilik(refgri2,sabit)
I = ikilik(I,sabit)
J = ikilik(J,sabit)
ref1kenar = kenarbul(ref1iki)
ref2kenar = kenarbul(ref2iki)
Ikenar = kenarbul(I)
Jkenar = kenarbul(J)
ref1,x1r,y1r = kutucukyarat(ref1kenar)
ref2,x2r,y2r = kutucukyarat(ref2kenar)
Ikutu,x1,y1 = kutucukyarat(Ikenar)
Jkutu,x2,y2 = kutucukyarat(Jkenar)
a,b = oranbul(x1r,y1r,x2r,y2r)
buyy1,buyx1 = oranbul(x1,y1,x1r,y1r)
#sx1,sy1 = GetSize(res1);sx1 *= buyx1;sy1 *= buyy1
#sx2,sy2 = GetSize(res2);sx2 *= buyx2;sy2 *= buyy2
#yeniresim1 = CreateImage((int(sx1),int(sy1)), IPL_DEPTH_8U, 1)
#yeniresim2 = CreateImage((int(sx2),int(sy2)), IPL_DEPTH_8U, 1)
#Resize(ref1iki,yeniresim1,CV_INTER_CUBIC)
#Resize(ref2iki,yeniresim2,CV_INTER_CUBIC)
print ' İlk resim için X eksenindeki büyüme: ',buyx1,' Y eksenindeki büyüme: ',buyy1
print ' X eksenindeki odak uzaklığı: ',int(odakbul(buyx1))
print ' Y eksenindeki odak uzaklığı: ',int(odakbul(buyy1))
#resmigoster(ref1iki,'Sayısal düzen: on.jpg')
#resmigoster(ref2iki,'Sayısal düzen: arka.jpg')
resmigoster(I,'Sayısal düzen: onmercek.jpg')
#resmigoster(J,'Sayısal düzen: arkamercek.jpg ')
resmigoster(Ikenar,'onmercek.jpg için kenarlar')
#resmigoster(Jkenar,'arkamercek.jpg için kenarlar')
#resmigoster(ref1kenar,'on.jpg için kenarlar')
#resmigoster(ref2kenar,'arka.jpg için kenarlar')
resmigoster(Ikutu,'onmercek.jpg için kutucuk')
#resmigoster(Jkutu,'arkamercek.jpg için kutucuk')
#resmigoster(ref1,'on.jpg için kutucuk')
#resmigoster(ref2,'arka.jpg için kutucuk')
#resmigoster(yeniresim1,'Büyütülmüş yeni resim 1')
#resmigoster(yeniresim2,'Büyütülmüş yeni resim 2')
return
def odakbul(m):
d = 36
do = 71
odak = m*do*d/(do+d)/(m-1)
return odak
def oranbul(x1,y1,x2,y2):
buyx = float(amax(x1)-amin(x1))/(amax(x2)-amin(x2))
buyy = float(amax(y1)-amin(y1))/(amax(y2)-amin(y2))
return buyx,buyy
def kutucukyarat(resim):
x=[];y=[];count=0
araresim = asarray(GetMat(resim))
boyutlar = araresim.shape
kutular = CreateImage(GetSize(resim), IPL_DEPTH_8U, 1)
j = boyutlar[1]/2
for i in xrange(0,boyutlar[0]):
if araresim[i][j]>0:
if araresim[i-1][j-1]<255:
kutular[i,j]=255
x.append(i)
y.append(j)
i = boyutlar[0]/2
for j in xrange(0,boyutlar[1]):
if araresim[i][j]>0:
if araresim[i-1][j-1]<255:
kutular[i,j]=255
x.append(i)
y.append(j)
return kutular,x,y
def ikilik(resim,sabit):
[nx,ny] = GetSize(resim)
araresim = GetMat(resim)
for i in xrange(0,nx-1):
for j in xrange(0,ny-1):
if araresim[j,i] > sabit:
araresim[j,i] = 255
else:
araresim[j,i] = 0
return resim
def kenarbul(resim):
for i in xrange(5):
Smooth(resim,resim,param1=7)
boyutlar = GetSize(resim)
kenarlar = CreateImage(boyutlar, IPL_DEPTH_8U, 1)
Canny(resim,kenarlar,40.0,60.0)
return kenarlar
def resmigoster(resim,baslik):
NamedWindow(baslik,1)
ShowImage(baslik,resim)
WaitKey(0)
return True
def buyume(x1,y1,x2,y2):
if len(x1)>=len(x2):
sinir = len(x2)
else:
sinir = len(x1)
A = zeros((sinir,2))
b = zeros((sinir,1))
for i in xrange(0,3):
if i%2==0:
A[i,0] = x1[i]
b[i] = x2[i]
else:
A[i,1] = y1[i]
b[i] = y2[i]
[buyx,buyy] = dot(pinv(dot(A.T,A)),dot(A.T,b))
return buyx,buyy
def gri(resim):
gri = CreateImage(GetSize(resim), IPL_DEPTH_8U, 1)
CvtColor(resim, gri,CV_RGB2GRAY)
return gri
def ana():
buyumebul('onmercek250mmxy.jpg','arkamercekxy.jpg','on.jpg','arka.jpg')
buyumebul('onmercek150mmx.jpg','arkamercekxy.jpg','on.jpg','arka.jpg')
return True
if __name__ == '__main__':
ana() | Python |
#!/usr/bin/python
## Revision history ############################################################
__author__ = 'Wouter Eerdekens <info@fks.be>'
__date__ = '2011-08-12'
__version__ = '0.1'
__history__ = """
2011-08-12 - Prepare for initial release <jeroen.hooyberghs@fks.be>
2006-07-26 - initial version.
"""
## Imports #####################################################################
import cgi, cgitb
import pki.cgi, pki.util
## We're being called as a script ##############################################
if __name__ == '__main__':
cgitb.enable()
form = cgi.FieldStorage()
if form.has_key('action') and form.has_key('cn'):
action = form['action'].value
cn = pki.util.strip_invalid(form['cn'].value)
if cn:
pki.cgi.start_html('Confirm: ' + action + ' certificate ' + cn)
pki.cgi.show_confirm(action, cn)
pki.cgi.end_html()
else:
pki.cgi.start_html('Invalid common name', True)
pki.cgi.show_error('Please specify a valid common name.')
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else:
pki.cgi.start_html('Invalid call', True)
pki.cgi.show_error('You cannot run this script directly.')
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
| Python |
#!/usr/bin/python
## Revision history ############################################################
__author__ = 'Wouter Eerdekens <info@fks.be>'
__date__ = '2011-08-12'
__version__ = '0.1'
__history__ = """
2011-08-12 - Prepare for initial release <jeroen.hooyberghs@fks.be>
2006-07-26 - initial version.
"""
## Imports #####################################################################
import cgitb
import os
from os import path
import pki.util, pki.cmd, pki.cgi
## We're being called as a script ##############################################
if __name__ == '__main__':
cgitb.enable()
config = pki.util.parse_config()
if not config:
pki.cgi.start_html('Error parsing configuration', True)
pki.cgi.show_error('There was an error parsing the vinty' \
+ ' configuration. Please check your setup.')
pki.cgi.end_html()
else:
if pki.cmd.check_pki(config):
pki.cgi.start_html('VInty PKI already initialised', True)
pki.cgi.show_error('The vinty PKI structure is already initialised.' \
' Remove the previous installation first.')
pki.cgi.show_link('OK')
pki.cgi.end_html()
else:
result, msg = pki.cmd.create_pki(config)
if result:
pki.cgi.start_html('Vinty PKI initialisation complete')
pki.cgi.show_info('You can now start using the Vinty Web Frontend.')
pki.cgi.show_link('OK')
pki.cgi.end_html()
else:
pki.cgi.start_html('Vinty PKI initialisation failed', True)
pki.cgi.show_error(msg)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
| Python |
#!/usr/bin/python
## Revision history ############################################################
__author__ = 'Wouter Eerdekens <we@fks.be>'
__date__ = '2011-08-12'
__version__ = '0.1'
__history__ = """
2011-08-12 - Prepare for initial release <jeroen.hooyberghs@fks.be>
2006-07-26 - initial version.
This code provides the functions available via the 'main' page.
"""
## Imports #####################################################################
import cgi, cgitb
import commands, os, sys, tempfile, zipfile
from os import path
import pki, pki.util, pki.cmd, pki.cgi
## Functions ###################################################################
def create_cert(form, config):
cn = pki.util.strip_invalid(form['cn'].value)
if not cn:
pki.cgi.start_html('Invalid common name', True)
pki.cgi.show_error('Please specify a valid common name.')
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else:
type = form['type'].value
result, msg = pki.cmd.create_cert(config, cn, type)
if result:
serial = pki.util.serial_from_cn(config, cn)
pki.cgi.start_html('Certificate creation complete')
pki.cgi.show_cert_info([('Common name', cn),
('Serial', serial),
('Certificate type', type)])
pki.cgi.show_link('OK')
pki.cgi.end_html()
else:
pki.cgi.start_html('Certificate creation failed', True)
pki.cgi.show_error('<pre>' + msg + '</pre>')
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def download_cert(form, config):
cn = form['cn'].value
serial = pki.util.serial_from_cn(config, cn)
files = pki.util.cert_files(config, cn, serial, True)
try:
name = path.join(tempfile.mkdtemp(), cn + '.zip')
zip = zipfile.ZipFile(name, 'w')
for file in files:
zip.write(file, path.basename(file))
zip.close()
except:
pki.cgi.start_html('Internal error', True)
pki.cgi.show_error(str(sys.exc_info()[1]))
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else:
print 'Content-Type: application/zip'
print 'Content-Disposition: attachment;filename=' + cn + '.zip'
print
print open(name).read()
os.remove(name)
os.rmdir(path.dirname(name))
# ------------------------------------------------------------------------------
def renew_cert(form, config):
cn = form['cn'].value
oldserial = pki.util.serial_from_cn(config, cn)
type = pki.util.cert_type(config, oldserial, pki.CERT_VALID)
result, msg = pki.cmd.renew_cert(config, cn, type)
if result:
newserial = pki.util.serial_from_cn(config, cn)
pki.cgi.start_html('Certificate renewal complete')
pki.cgi.show_cert_info([('Common name', cn),
('Old serial', oldserial),
('New serial', newserial),
('Certificate type', type)])
pki.cgi.show_link('OK')
pki.cgi.end_html()
else:
pki.cgi.start_html('Certificate renewal failed', True)
pki.cgi.show_error(msg)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def revoke_cert(form, config):
cn = form['cn'].value
serial = pki.util.serial_from_cn(config, cn)
type = pki.util.cert_type(config, serial, pki.CERT_VALID)
result, msg = pki.cmd.revoke_cert(config, cn)
if result:
pki.cgi.start_html('Certificate revocation complete')
pki.cgi.show_cert_info([('Common name', cn),
('Serial', serial),
('Certificate type', type)])
pki.cgi.show_link('OK')
pki.cgi.end_html()
else:
pki.cgi.start_html('Certificate revocation failed', True)
pki.cgi.show_error(msg)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def pw_add(form, config):
"""
pw_add : add password to certificate key
"""
cn = form['cn'].value
if form.has_key('password') :
pw = form['password'].value
else :
pw = ''
keyDir = config.get('keys', 'path')
keyFile = path.join(keyDir,cn + '.key')
tmpFile = keyFile + '.tmp'
if pki.util.is_private_key(keyFile) :
if not pki.util.is_encrypted(keyFile) :
try:
pwFd, pwFname = tempfile.mkstemp()
os.write(pwFd, pw)
os.close(pwFd)
except:
sys.stderr.write('Problem creating secure temporary file...\n')
sys.exit(1)
else:
cmd = pki.OPENSSL_COMMANDS['set_passphrase'] \
% { 'openssl' : config.get('openssl', 'path'),
'pwfile' : pwFname,
'inkey' : keyFile,
'outkey' : tmpFile
}
status, output = commands.getstatusoutput(cmd)
os.remove(pwFname)
if status:
pki.cgi.start_html('Password addition')
pki.cgi.show_error(output)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
sys.stderr.write('Problem setting passphrase:\n' + output)
else:
try:
os.remove(keyFile)
os.rename(tmpFile, keyFile)
pki.cgi.start_html('Password added')
pki.cgi.show_link('OK')
pki.cgi.end_html()
except:
sys.stderr.write('Problem storing new keyFile ' + keyFile + '\n')
else :
output = keyFile + ' is password protected : first remove password'
pki.cgi.start_html('Password addition')
pki.cgi.show_error(output)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else :
output = keyFile + ' is not a key : software error'
pki.cgi.start_html('Password addition')
pki.cgi.show_error(output)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def pw_remove(form, config):
"""
pw_remove : remove password from certificate key
"""
cn = form['cn'].value
if form.has_key('password') :
pw = form['password'].value
else :
pw = ''
keyDir = config.get('keys', 'path')
keyFile = path.join(keyDir,cn + '.key')
tmpFile = keyFile + '.tmp'
if pki.util.is_private_key(keyFile) :
if pki.util.is_encrypted(keyFile) :
try:
pwFd, pwFname = tempfile.mkstemp()
os.write(pwFd, pw)
os.close(pwFd)
except:
sys.stderr.write('Problem creating secure temporary file...\n')
sys.exit(1)
else:
cmd = pki.OPENSSL_COMMANDS['del_passphrase'] \
% { 'openssl' : config.get('openssl', 'path'),
'pwfile' : pwFname,
'inkey' : keyFile,
'outkey' : tmpFile
}
status, output = commands.getstatusoutput(cmd)
os.remove(pwFname)
if status:
pki.cgi.start_html('Password removal')
pki.cgi.show_error('Wrong password : decryption failed')
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else:
try:
os.remove(keyFile)
os.rename(tmpFile, keyFile)
pki.cgi.start_html('Password removed')
pki.cgi.show_link('OK')
pki.cgi.end_html()
except:
sys.stderr.write('Problem storing new keyFile ' + keyFile + '\n')
else :
output = keyFile + ' is not password protected'
pki.cgi.start_html('Password removal')
pki.cgi.show_error(output)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else :
output = keyFile + ' is not a key : software error'
pki.cgi.start_html('Password removal')
pki.cgi.show_error(output)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def publish_crl(config):
if config.has_section('scripts') and \
config.has_option('scripts', 'publish_crl'):
script = config.get('scripts', 'publish_crl')
if path.isfile(script):
crl = config.get('ca','crl')
status, output = commands.getstatusoutput(script + ' ' + crl)
if status:
pki.cgi.start_html('Problem executing script', True)
pki.cgi.show_error(output)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else:
pki.cgi.start_html('Certificate Revocation List published')
pki.cgi.show_info('The script to publish the Certificate' \
+ ' Revocation List completed successfully.')
pki.cgi.show_link('OK')
pki.cgi.end_html()
else:
pki.cgi.start_html('Script not found', True)
pki.cgi.show_error('The script to publish the Certificate' \
+ ' Revocation list cannot be found.')
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
else:
pki.cgi.start_html('Configuration error', True)
pki.cgi.show_error('The current configuration doesn\'t have a script' \
+ ' configured to publish the Certificate Revocation' \
+ ' List.')
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def show_cert_db(config):
pki.cgi.start_html('Vinty PKI Certificate Database')
pki.cgi.show_cert_db(config)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def show_con_file(config):
pki.cgi.start_html('OpenVPN Connection Status')
pki.cgi.show_con_file(config)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
# ------------------------------------------------------------------------------
def show_log_file(config):
"""
show last lines from openvpn.log
"""
pki.cgi.start_html('OpenVPN Log')
pki.cgi.show_log_file(config)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
## We're being called as a script ##############################################
if __name__ == '__main__':
cgitb.enable()
config = pki.util.parse_config()
if not config:
pki.cgi.start_html('Error parsing configuration', True)
pki.cgi.show_error('There was an error parsing the vinty' \
+ ' configuration. Please check your setup.')
pki.cgi.end_html()
else:
form = cgi.FieldStorage()
if form.has_key('action'):
action = pki.util.strip_invalid(form['action'].value)
else:
action = None
if action == 'create':
create_cert(form, config)
elif action == 'download':
download_cert(form, config)
elif action == 'renew':
renew_cert(form, config)
elif action == 'revoke':
revoke_cert(form, config)
elif action == 'p_add':
pw_add(form, config)
elif action == 'p_remove':
pw_remove(form, config)
elif action == 'publish':
publish_crl(config)
elif action == 'show':
show_cert_db(config)
elif action == 'con_show':
show_con_file(config)
elif action == 'log_show':
show_log_file(config)
else:
pki.cgi.start_html('Invalid call', True)
pki.cgi.show_error('You cannot use this script directly.')
pki.cgi.show_error(action)
pki.cgi.show_link('Go Back')
pki.cgi.end_html()
| Python |
#!/usr/bin/python
## Revision history ############################################################
__author__ = 'Wouter Eerdekens <info@fks.be>'
__date__ = '2011-08-12'
__version__ = '0.1'
__history__ = """
2011-08-12 - Prepare for initial release <jeroen.hooyberghs@fks.be>
2006-07-26 - initial version.
"""
## Imports #####################################################################
import cgitb
import os
from os import path
import pki.util, pki.cmd, pki.cgi
## We're being called as a script ##############################################
if __name__ == '__main__':
cgitb.enable()
config = pki.util.parse_config()
if not config:
pki.cgi.start_html('Error parsing configuration', True)
pki.cgi.show_error('There was an error parsing vinty' \
+' configuration. Please check your setup.')
pki.cgi.end_html()
else:
if not pki.cmd.check_pki(config):
pki.cgi.start_html('Vinty PKI Initialisation')
pki.cgi.init_page()
pki.cgi.end_html()
else:
pki.cgi.start_html('Vinty PKI Web Frontend')
pki.cgi.main_page(config)
pki.cgi.end_html()
| Python |
#!/usr/bin/python
## Revision history ############################################################
## Revision history ############################################################
__author__ = 'Wouter Eerdekens <info@fks.be>'
__date__ = '2011-08-12'
__version__ = '0.1'
__history__ = """
2011-08-12 - Prepare for initial release <jeroen.hooyberghs@fks.be>
2006-07-26 - initial version.
"""
## Imports #####################################################################
import os, sys
from os import path
from optparse import OptionParser
import pki.cmd, pki.util
## Functions ###################################################################
def error(msg):
sys.stderr.write('Error: ' + msg + '\n')
sys.exit(1)
# ------------------------------------------------------------------------------
def parse_argv(argv):
cn = ''
usage = """%prog --init
%prog --create client|server CN
%prog --renew client|server CN
%prog --revoke CN
%prog --list valid|revoked|all"""
version = '%prog ' + __version__
parser = OptionParser(usage=usage, version=version)
parser.add_option('--init', action='store_true', default=False,
help='initialise the PKI infrastructure')
parser.add_option('--list', metavar='TYPE', type='choice',
choices=('valid', 'revoked', 'all'),
help='list all certificates of TYPE')
parser.add_option('--create', metavar='TYPE', type='choice',
choices=(pki.CERT_CLIENT, pki.CERT_SERVER),
help='create a certificate/key pair of TYPE')
parser.add_option('--renew', metavar='TYPE', type='choice',
choices=(pki.CERT_CLIENT, pki.CERT_SERVER),
help='renew a certificate of TYPE')
parser.add_option('--revoke', action='store_true', default=False,
help='revoke a certificate')
opt, arg = parser.parse_args(argv)
if opt.init:
if opt.create or opt.renew or opt.revoke or opt.list:
parser.error('--init cannot be combined with other options')
if len(arg) != 0:
parser.error('--init doesn\'t take an argument')
elif opt.list:
if opt.init or opt.create or opt.renew or opt.revoke:
parser.error('--list cannot be combined with other options')
if len(arg) != 0:
parser.error('--list doesn\'t take extra arguments')
elif opt.create:
if opt.init or opt.list or opt.renew or opt.revoke:
parser.error('--create cannot be combined with other options')
if len(arg) == 1:
cn = arg[0]
else:
parser.error('--create requires you to specify a CN')
elif opt.renew:
if opt.init or opt.create or opt.revoke or opt.list:
parser.error('--renew cannot be combined with other options')
if len(arg) == 1:
cn = arg[0]
else:
parser.error('--renew requires you to specify a CN')
elif opt.revoke:
if opt.init or opt.create or opt.renew or opt.list:
parser.error('--revoke cannot be combined with other options')
if len(arg) == 1:
cn = arg[0]
else:
parser.error('--revoke requires you to specify a CN')
else:
parser.print_help()
sys.exit(1)
return opt, cn
# ------------------------------------------------------------------------------
def info(msg):
sys.stdout.write(msg)
sys.stdout.flush()
# ------------------------------------------------------------------------------
def pki_init(config):
sys.stdout.write('Creating PKI infrastructure: ')
sys.stdout.flush()
result, msg = pki.cmd.create_pki(config)
if result:
print 'ok'
else:
print 'failed'
error(msg)
# ------------------------------------------------------------------------------
def cert_create(config, cn):
type = opt.create
cn = pki.util.strip_invalid(cn)
if cn:
info('Creating ' + type + ' certificate with CN ' + cn + ': ')
result, msg = pki.cmd.create_cert(config, cn, type)
if result:
print 'ok (serial: ' + pki.util.serial_from_cn(config, cn) + ')'
else:
print 'failed'
error(msg)
else:
error('invalid CN given')
# ------------------------------------------------------------------------------
def cert_renew(config, cn):
type = opt.renew
cn = pki.util.strip_invalid(cn)
if cn:
serial = pki.util.serial_from_cn(config, cn)
if serial:
info('Renewing ' + type + ' certificate with CN ' + cn + ': ')
result, msg = pki.cmd.renew_cert(config, cn, opt.renew)
if result:
print ' ok (new serial: ' + pki.util.serial_from_cn(config,cn) + ')'
else:
print ' failed'
error(msg)
else:
error('no serial found for ' + cn)
else:
error('invalid CN given')
# ------------------------------------------------------------------------------
def cert_revoke(config, cn):
cn = pki.util.strip_invalid(cn)
if cn:
serial = pki.util.serial_from_cn(config, cn)
if serial:
info('Revoking certificate with CN ' + cn + ', serial ' + serial +': ')
result, msg = pki.cmd.revoke_cert(config, cn, opt.revoke)
if result:
print ' ok'
else:
print ' failed'
error(msg)
else:
error('no serial found for ' + cn)
else:
error('invalid CN given')
## We're being called as a script ##############################################
if __name__ == '__main__':
opt, cn = parse_argv(sys.argv[1:])
config = pki.util.parse_config()
if not config:
error('unable to read/validate configuration')
if opt.init:
pki_init(config)
else:
if not pki.cmd.check_pki(config):
error('PKI infrastructure not valid (first run with --init)')
if opt.list:
pki.cmd.list_certs(config, opt.list)
elif opt.create:
cert_create(config, cn)
elif opt.renew:
cert_renew(config, cn)
elif opt.revoke:
cert_revoke(config, cn)
| Python |
#!/usr/bin/python
from distutils.core import setup
setup (
version = '0.1',
description = 'Vinty Public Key Infrastructure tools',
author = 'Wouter Eerdekens',
author_email = 'info@fks.be',
url = 'http://www.fks.be/',
packages = ['pki'],
package_dir = { 'pki' : 'packages/pki' }
)
| Python |
#!/usr/bin/python
## Revision history ############################################################
__author__ = 'Wouter Eerdekens <info@fks.be>'
__date__ = '2011-08-12'
__version__ = 0.1
__history__ = """
2011-08-12 - Prepare for initial release <jeroen.hooyberghs@fks.be>
2006-07-26 - initial version.
"""
################################################################################
## Imports #####################################################################
from getpass import getpass
from optparse import OptionParser
import commands, os, os.path, re, sys, tempfile
import pki, pki.util
## Functions ###################################################################
def error(msg):
sys.stderr.write('Error: ' + msg + '\n')
sys.exit(1)
# ------------------------------------------------------------------------------
def parse_argv(argv):
keyfile = ''
usage = """%prog --set KEY
%prog --remove KEY"""
version = '%prog ' + __version__
parser = OptionParser(usage=usage, version=version)
parser.add_option('--set', action='store_true', default=False,
metavar="KEY", help='add a passphrase to KEY')
parser.add_option('--remove', action='store_true', default=False,
metavar="KEY", help='remove the passphrase from KEY')
opt, arg = parser.parse_args(argv)
if opt.set and opt.remove:
parser.error('--set and --remove are mutually exclusive')
elif opt.set or opt.remove:
if len(arg) == 1:
keyfile = arg[0]
else:
parser.error('no keyfile given')
else:
parser.print_help()
sys.exit(1)
return opt, keyfile
# ------------------------------------------------------------------------------
def remove_passphrase(config, keyfile, remove_only=True):
if remove_only:
passwd = getpass()
else:
passwd = getpass('Old password: ')
tmpfile = keyfile + '.tmp'
try:
fd, fname = tempfile.mkstemp()
os.write(fd, passwd)
os.close(fd)
except:
sys.stderr.write('Problem creating secure temporary file...\n')
sys.exit(1)
else:
cmd = pki.OPENSSL_COMMANDS['del_passphrase'] \
% { 'openssl' : config.get('openssl', 'path'),
'pwfile' : fname,
'inkey' : keyfile,
'outkey' : tmpfile
}
status, output = commands.getstatusoutput(cmd)
os.remove(fname)
if status:
sys.stderr.write('Problem removing old passphrase:\n' + output)
sys.exit(1)
else:
try:
os.remove(keyfile)
os.rename(tmpfile, keyfile)
except:
sys.stderr.write('Problem storing new keyfile.\n')
# ------------------------------------------------------------------------------
def set_passphrase(config, keyfile):
if pki.util.is_encrypted(keyfile):
remove_passphrase(config, keyfile, remove_only=False)
pw1 = getpass()
pw2 = getpass('Retype password: ')
tmpfile = keyfile + '.tmp'
if pw1 != pw2:
sys.stderr.write('Password mismatch.\n')
sys.exit(1)
else:
try:
fd, fname = tempfile.mkstemp()
os.write(fd, pw1)
os.close(fd)
except:
sys.stderr.write('Problem creating secure temporary file...\n')
sys.exit(1)
else:
cmd = pki.OPENSSL_COMMANDS['set_passphrase'] \
% { 'openssl' : config.get('openssl', 'path'),
'pwfile' : fname,
'inkey' : keyfile,
'outkey' : tmpfile
}
status, output = commands.getstatusoutput(cmd)
os.remove(fname)
if status:
sys.stderr.write('Problem setting passphrase:\n' + output)
sys.exit(1)
else:
try:
os.remove(keyfile)
os.rename(tmpfile, keyfile)
except:
sys.stderr.write('Problem storing new keyfile.\n')
## We're being called as a script ##############################################
if __name__ == '__main__':
opt, keyfile = parse_argv(sys.argv[1:])
config = pki.util.parse_config()
if not os.path.isfile(keyfile):
error('Unable to open ' + keyfile + '.\n')
else:
if pki.util.is_private_key(keyfile):
if opt.set:
print 'Setting passphrase on ' + keyfile
set_passphrase(config, keyfile)
else:
print 'Removing passphrase from ' + keyfile
if pki.util.is_encrypted(keyfile):
remove_passphrase(config, keyfile)
else:
error(keyfile + ' has no passphrase set.')
else:
error(keyfile + ' is not a private key.')
| Python |
import socket
import sys
from utils import *
from oppEval import *
from preflop import *
from postflop import *
from handEval import *
"""
Simple example pokerbot, written in python. This is an example of a bare bones,
dumb pokerbot - it only sets up the socket necessary to connect with the engine
and then always returns the same action. It is meant as an example of how a
pokerbot should communicate with the engine.
"""
action = None
decision = None
# port number specified by the engine to connect to.
def connect(port = int(sys.argv[1])):
global action
if __name__ == "__main__":
# port number specified by the engine to connect to.
port = int(sys.argv[1])
# connect the socket to the engine
s = socket.create_connection(('localhost', int(sys.argv[1])))
# Get a file-object for reading packets from the socket.
# Using this ensures that you get exactly one packet per read.
f_in = s.makefile()
while 1:
# block until the engine sends us a packet
data = f_in.readline()
# if we receive a null return, then the connection is dead
if not data:
print "Gameover, engine disconnected"
break
# Here is where you should implement code to parse the packets from
# the engine and act on it.
#print data
action = 'CHECK\n'
parse(data)
# When appropriate, reply to the engine with a legal action.
# The engine will ignore all spurious packets you send.
# The engine will also check/fold for you if you return an
# illegal action.
# When sending responses, you need to have a newline character (\n) or
# carriage return (\r), or else your bot will hang!
#Player.me.hand.write()
if data.split()[0] == "GETACTION":
#print action
s.send(action)
# if we get here, the server disconnected us, so clean up the socket
s.close()
def parse(s):
global action
data = s.split(' ')
if data[0] == "NEWGAME":
Game.newGame(data)
elif data[0] == "NEWHAND":
Game.newHand(data)
elif data[0] == "GETACTION":
getAction(data)
action = playGame() + '\n'
Game.lastStreet = Game.street
elif data[0] == "HANDOVER":
handOver(data)
def getAction(data):
index = 2
numBoardCards = int(data[index])
if numBoardCards > 0:
community = Hand([parseCard(card) for card in data[index + 1].split(',')])
index += 1
index += 1
numLastActions = int(data[index])
if numLastActions > 0:
lastActions = data[index + 1].split(',')
else:
lastActions = []
# Perform all actions in last actions list
for lastAction in lastActions:
action = Action(lastAction)
action.perform()
if action.player in [Player.leftOpp, Player.rightOpp] and Game.street > 0 and Game.street < 4:
updateWithAction(action.player, action)
elif action.isType('DEAL'):
Game.community = community
if Game.street == 1:
updateWithFlop(Player.leftOpp)
updateWithFlop(Player.rightOpp)
if not Player.leftOpp.folded:
Player.leftOpp.sawFlopCount += 1
if not Player.rightOpp.folded:
Player.rightOpp.sawFlopCount += 1
if Game.street >= 1:
handEval(Player.me)
Player.me.time = float(data[len(data) - 1])
def handOver(data):
global lastActions, time, playersLeft
numLastActions = int(data[4])
if numLastActions > 0:
actions = data[5].split(',')
else:
actions = []
for action in actions:
Action(action).perform()
Player.me.time = float(data[len(data) - 1])
postHandEval()
def postHandEval():
for player in [Player.leftOpp, Player.rightOpp]:
addPreflopActions(player, player.actions[0])
for i in range(1, 4):
addPostflopActions(player, player.actions[i])
if player.hand.size() > 0:
# Preflop hand strength
strength = preflopStrength(player.hand)
addPreflopStrengths(player, strength)
# Postflop hand strength
strength = postflopStrength(player.hand, Hand(Game.community.cards[0:3]), 1)
addPostflopStrengths(player, strength)
for i in range(4, 6):
strength = postflopStrength(player.hand, Hand(Game.community.cards[0:i]), i - 2)
addPostflopStrengths(player, strength)
else:
addPreflopStrengths(player, None)
for i in range(3):
addPostflopStrengths(player, None)
adjPreflopBehavior(player)
adjPostflopBehavior(player)
def playGame():
global decision
strategize(Player.me)
if Game.street == 0:
if Game.street != Game.lastStreet:
decision = preflopBot(Player.me)
return preflopMakeDecision(decision)
elif Game.street >= 1:
#print sum(Player.rightOpp.dist[:5]), sum(Player.rightOpp.dist[:10]), sum(Player.rightOpp.dist[:20]), sum(Player.rightOpp.dist[:30])
decision = postflopBot(Player.me)
return postflopMakeDecision(decision)
def preflopMakeDecision(decision):
betAmt = decision[1] * Game.sb
if betAmt > Pot.toCall(Player.me) and Pot.toCall(Player.me) < Player.me.stack:
if Pot.raiseCounts[Player.me] == 0:
if Player.me.absHS > 0.06:
decision = (decision[0], 0, decision[2])
return 'RAISE:' + str(legalBet(betAmt))
elif Pot.raiseCounts[Player.me] >= 1 and Player.me.absHS <= 0.06:
return 'RAISE:' + str(legalBet(100 * Game.bb))
if Pot.toCall(Player.me) == 0:
return 'CHECK'
if Pot.toCall(Player.me) <= 2 * Game.sb and decision[0]:
return legalCall()
if Pot.toCall(Player.me) <= decision[2]:
return legalCall()
return 'FOLD'
def postflopMakeDecision(decision):
decision = scaleDecision(decision)
betAmt = int(decision[0] * Pot.size) * Game.sb
if betAmt > Pot.toCall(Player.me) and Pot.toCall(Player.me) < Player.me.stack:
decision = (0, decision[1])
if Pot.raiseCounts[Player.me] <= 1:
if Pot.toCall(Player.me) == 0:
return 'BET:' + str(legalBet(betAmt))
elif Pot.toCall(Player.me) > 0:
return 'RAISE:' + str(legalBet(betAmt))
if Pot.toCall(Player.me) == 0:
return 'CHECK'
if Pot.size >= 30 and float(Pot.toCall(Player.me)) / Pot.size <= .06:
return legalCall()
if float(Pot.size) / Pot.toCall(Player.me) >= decision[1]:
return legalCall()
return 'FOLD'
def scaleDecision(decision):
raiseAmt, callUpTo = decision
# Scale % of pot to call
if Pot.size <= 12 * Game.sb:
raiseAmt *= 1.4
elif Pot.size <= 20 * Game.sb:
raiseAmt *= 1.2
if Pot.size >= 90 * Game.sb:
raiseAmt *= .8
# Scale pot odds to call
if Pot.size <= 15 * Game.sb:
callUpTo *= .5
#print 'raiseamt: ' + str(raiseAmt), 'callupto:' + str(callUpTo)
return (raiseAmt, callUpTo)
def legalBet(bet):
# Bet at least min raise amt
#print 'minraiseamt: ' + str(Pot.minRaiseAmt), 'maxbet: ' + str(max(Pot.bets.values())), bet
bet = max(Pot.minRaiseAmt + max(Pot.bets.values()), bet)
# If not enough chips, go all in
if bet > Player.me.stack:
#print 'going allin', 'mybet: ' + str(Pot.bets[Player.me]), 'stack: ' + str(Player.me.stack)
bet = Pot.bets[Player.me] + Player.me.stack
return bet
def legalCall():
if Pot.toCall(Player.me) > Player.me.stack:
return 'RAISE:' + str(Pot.bets[Player.me] + Player.me.stack)
else:
return 'CALL'
connect()
| Python |
from utils import *
# Updates opponent's preflop actions and the (# raises, # calls, # folds)
def addPreflopActions(opp, preflopActions):
actions = []
for action in preflopActions:
if not action.isType('POST'):
actions.append(action)
if len(actions) > 1 and actions[-1].type == Action.CALL:
actions = actions[:-1]
opp.preflopActions.append(actions)
# Updates opponent's preflop strengths
def addPreflopStrengths(opp, strength):
opp.preflopStrengths.append([strength])
# Adjust opponent's preflop tightness and aggression
def adjPreflopBehavior(opp):
raiseTotal, callTotal, total = 0, 0, 0
for i in range(len(opp.preflopActions)):
for action in opp.preflopActions[i]:
weight = max(1 - 0.01 * (len(opp.preflopActions) - i - 1), 0.01)
if action.isType('RAISE'):
raiseTotal += weight
elif action.isType('CALL'):
callTotal += weight
total += weight
total = max(total, 1.)
raiseRatio, callRatio = raiseTotal / total, callTotal / total
a, b = max(100 - len(opp.preflopActions), 0) / 100., min(len(opp.preflopActions), 100) / 100.
opp.preflopAggro = OPP_PREFLOP_AGGRO * a + raiseRatio * b
opp.preflopTight = OPP_PREFLOP_TIGHT * a + (raiseRatio + callRatio) * b
# Updates opponent's postflop actions and the (# raises, # calls, # folds)
def addPostflopActions(opp, postflopActions):
actions = []
for action in postflopActions:
if not action.isType('SHOW'):
actions.append(action)
if len(actions) > 1 and actions[-1].type == Action.CALL:
actions = actions[:-1]
opp.postflopActions.append(actions)
# Updates opponent's postflop strengths
def addPostflopStrengths(opp, strength):
opp.postflopStrengths.append([strength])
# Adjust opponent's postflop tightness and aggression
def adjPostflopBehavior(opp):
raiseTotal, callTotal, total = 0, 0, 0
for i in range(len(opp.postflopActions)):
for action in opp.postflopActions[i]:
weight = max(1 - 0.005 * (len(opp.postflopActions) - i - 1), 0.01)
if action.isType('RAISE'):
raiseTotal += weight
elif action.isType('CALL'):
callTotal += weight
total += weight
total = max(total, 1.)
raiseRatio, callRatio = raiseTotal / total, callTotal / total
a, b = max(300 - len(opp.postflopActions), 0) / 300., min(len(opp.postflopActions), 300) / 300.
opp.postflopAggro = OPP_POSTFLOP_AGGRO * a + raiseRatio * b
opp.postflopTight = OPP_POSTFLOP_TIGHT * a + (raiseRatio + callRatio) * b
if opp.postflopAggro < .05:
opp.postflopAggro = .05
if opp.postflopTight < .1:
opp.postflopTight = .1
| Python |
from utils import *
# handEval
def handEval(player):
myHand = [card.id for card in player.hand.cards]
boardCards = [card.id for card in Game.community.cards]
strength = parseEval(pokereval.poker_eval('holdem', [myHand, [255, 255]], extend(boardCards), iterations = 50000))
if Game.street == 1:
player.absHS = flopScaleWin(strength)
elif Game.street == 2:
player.absHS = turnScaleWin(strength)
elif Game.street == 3:
player.absHS = (1000 - strength) / 1000.
if Game.street > 1:
shakeWell(Player.leftOpp)
shakeWell(Player.rightOpp)
updateHandEval(player)
f = probFlush(player.hand, Game.community)
player.draws = f[0] + (1 - f[0]) * .5 * f[1] + probStraight(player.hand, Game.community)
def extend(boardCards):
if len(boardCards) == 3:
return boardCards + [255, 255]
elif len(boardCards) == 4:
return boardCards + [255]
return boardCards
def updateHandEval(player):
if Player.leftOpp.folded:
Player.leftOpp.dist = [0] * 51
Player.me.adjHS1 = min(sum(Player.leftOpp.dist[:int(round(Player.me.absHS * 50))]), 1)
if Player.rightOpp.folded:
Player.rightOpp.dist = [0] * 51
Player.me.adjHS2 = min(sum(Player.rightOpp.dist[:int(round(Player.me.absHS * 50))]), 1)
def probFlush(hand, board):
suits = [0] * 4
for i in hand.cards:
suits[i.suit] += 1
for i in board.cards:
suits[i.suit] += 1
(shown, have) = (board.size(), max(suits))
if have >= 5:
return (1, 0)
elif (shown, have) == (3, 4):
return (.19, .35)
elif (shown, have) == (3, 3):
return (0, .042)
elif (shown, have) == (4, 4):
return (.1957, 0)
else:
return (0, 0)
def probStraight(hand, board):
ranks = [0] * 13
for i in hand.cards:
ranks[i.rank] += 1
for i in board.cards:
ranks[i.rank] += 1
total = 0
if len(board.cards) == 3:
for j in range(13):
for k in range(j + 1, 13):
ranks[j] += 1
ranks[k] += 1
if checkStraight(ranks):
total += 1
ranks[j] -= 1
ranks[k] -= 1
return total / 156.
elif len(board.cards) == 4:
for j in range(13):
ranks[j] +=1
if checkStraight(ranks):
total += 1
ranks[j] -= 1
return total / 13.
return 0
# oppHandEval
def updateWithFlop(player, behavior1 = [.5, .4, .2], behavior2 = [.3, .4, .3], behavior3 = [.2, .3, .2]):
raised = findRaise(player.actions[0])
checked = findCheck(player.actions[0])
if Game.handId > 30:
tight = .3 * player.preflopTight + .7 * player.sawFlopPct
else:
tight = player.preflopTight
if raised >= 7 * Game.sb:
hands = (1., .7 * player.preflopAggro, .6, 2 * player.preflopAggro, .1)
elif raised > 0:
hands = (behavior1[0], player.preflopAggro, behavior2[0], tight, behavior3[0])
elif checked:
hands = (behavior1[1], player.preflopAggro, behavior2[1], tight, behavior3[1])
else:
hands = (behavior1[2], player.preflopAggro, behavior2[2], tight, behavior3[2])
# (prob1, range1, prob2, range2)
cardSum = 0
for i in Game.community.cards:
cardSum += i.rank
if cardSum >= 26:
for i in range(51):
if i / 50. < hands[1]:
player.dist[i] += hands[0]
elif i / 50. < hands[3]:
player.dist[i] += hands[2]
else:
player.dist[i] += hands[4]
elif cardSum >= 19:
for i in range(51):
if i / 50. < hands[1]:
player.dist[i] += .5 * hands[0] + .5 * hands[2]
elif i / 50. < hands[3]:
player.dist[i] += hands[2]
else:
player.dist[i] += .5 * hands[4] + .5 * hands[2]
else:
player.dist = [1] * 51
player.dist = scale(player.dist)
def findRaise(actions):
for i in actions:
if i.isType('RAISE'):
return i.val
return 0
def findCheck(actions):
if actions[0].isType('CALL') and actions[0].val <= 2 * Game.sb:
return True
return False
def lookUpWeight(hands, strength):
if strength <= hands[1]:
return hands[0] + (hands[2] - hands[0]) / (hands[1]) * strength
elif strength <= hands[3]:
return hands[2] + (hands[4] - hands[2]) / (hands[3] - hands[1]) * (strength - hands[1])
return hands[4]
def oppFlushDraw(i, j):
suits = [0] * 4
for i in Game.community.cards:
suits[i.suit] += 1
if i < j and max(suits) == 1:
return .75 * .04
elif i < j and max(suits) == 2:
return .25 * .35
elif i < j:
return .25 * 1
elif max(suits) == 3:
return .5 * .35 * .5
return 0
def oppStraightDraw(i, j):
ranks = [0] * 13
ranks[i] += 1
ranks[j] += 1
for i in Game.community.cards:
ranks[i.rank] += 1
total = 0
for j in range(13):
for k in range(j + 1, 13):
ranks[j] += 1
ranks[k] += 1
if checkStraight(ranks):
total += 1
ranks[j] -= 1
ranks[k] -= 1
return total / 156.
def checkStraight(ranks):
for i in range(-1, 9):
straight = 1
for j in range(i, i + 5):
if ranks[j] == 0:
straight = 0
if straight == 1:
return True
return False
def scale(weights):
total = sum(weights)
if total != 0:
return [float(i) / total for i in weights]
else:
return [0] * 51
def updateWithAction(player, action):
if not player.folded:
for i in range(51):
prob = oppModel(player, action, .02 * i)
player.dist[i] = prob * player.dist[i]
player.dist = scale(player.dist)
updateHandEval(Player.me)
def adjustRaiseSize(val, player):
raiseSize = float(val) / Pot.size
if player.largestBet >= 50 * Game.sb:
raiseSize = float(min(.4 * val, 40)) / Pot.size
elif player.largestBet >= 25 * Game.sb:
raiseSize = float(.7 * val) / Pot.size
elif player.largestBet >= 12 * Game.sb:
raiseSize = float(.85 * val) / Pot.size
if val <= 4 * Game.sb:
raiseSize *= .5
elif raiseSize >= .5 and player.largestBet < 12:
raiseSize = .5 + 2 * (raiseSize - .5)
return raiseSize
def oppModel(player, action, strength):
raiseSize = adjustRaiseSize(action.val, player)
if action.isType('RAISE') and action.val >= 45 * Game.sb and raiseSize > .5:
if strength <= .5 * (player.bigBetPct / player.sawFlopPct - .05):
return max(3 - strength * 7, 2)
elif strength < 1.1 * (player.bigBetPct / player.sawFlopPct - .05):
return max(2 - strength * 3, 1)
elif strength < 2 * player.postflopAggro:
return .4
else:
return player.bluff
if action.isType('RAISE') and raiseSize > .05:
if strength <= player.postflopAggro:
return max(3 * (raiseSize - .05)**1.5 - (3 * (.4 + .2 * (raiseSize - .4)) * (raiseSize - .05)**1.5) / player.postflopAggro * strength, player.bluff)
elif strength <= player.postflopTight:
return max(3 * (.6 - .2 * (raiseSize - .4)) * (raiseSize - .05)**1.5 - abs(3 * (.6 - .2 * (raiseSize - .4)) * (raiseSize - .05)**1.5 - player.bluff) / (player.postflopTight - player.postflopAggro) * (strength - player.postflopAggro), player.bluff)
else:
return player.bluff
elif action.isType('CALL') and raiseSize >= .05:
callSize = raiseSize
if strength <= 1.5 * player.postflopAggro and callSize > .5:
return callSize
elif strength <= 1.5 * player.postflopAggro:
return .35 + .5 * (callSize - .35)
elif strength <= player.postflopTight:
return .35
else:
return .25
else:
if strength <= player.postflopAggro:
return 2.5 * player.slowplay
elif strength <= player.postflopTight:
return 2.5 * player.slowplay + (1 - player.bluff - 2.5 * player.slowplay) / (player.postflopTight - player.postflopAggro) * (strength - player.postflopAggro)
else:
return 1 - player.bluff
def clip(v, vMin, vMax):
if v < vMin:
return abs(v)
elif v > vMax:
return vMax
return v
def shakeWell(player):
if not player.folded:
newWeights = [0] * 51
for i in range(51):
for j in range(i - 7, i + 8):
newWeights[clip(j, 0, 50)] += 1 * player.dist[i]
player.dist = scale(newWeights)
| Python |
#
# Copyright (C) 2007, 2008 Loic Dachary <loic@dachary.org>
# Copyright (C) 2004, 2005, 2006 Mekensleep
#
# Mekensleep
# 24 rue vieille du temple
# 75004 Paris
# licensing@mekensleep.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Authors:
# Loic Dachary <loic@dachary.org>
#
#
import sys
sys.path.insert(0, ".")
sys.path.insert(0, ".libs")
from pokereval import PokerEval
iterations_low = 100000
iterations_high = 200000
pokereval = PokerEval()
if pokereval.best_hand_value("hi", ["Ah", "Ad", "As", "Kh", "Ks" ]) != 101494784:
sys.exit(1)
if pokereval.string2card("2h") != 0:
sys.exit(1)
print ""
pockets = [ ["As", "Ad", "Ac", "Tc", "Ts", "2d", "5c" ],
["Js", "Jc", "7s", "8c", "8d", "3c", "3h" ],
[255, 255 ] ]
print "stud7 (1) result = %s\n" % pokereval.winners(game = "7stud", pockets = pockets, dead = [], board = [])
pockets = [[22, 18, 21, 3, 41, 1, 30], [39, 255, 255, 15, 13, 17, 255]]
print "stud7 (2) result = %s\n" % pokereval.winners(game = "7stud", pockets = pockets, dead = [], board = [])
print [ j + i + "/%d" % pokereval.string2card(j + i) for i in "hdcs" for j in "23456789TJQKA" ]
print "deck = %s\n" % pokereval.deck()
print "result = %s\n" % pokereval.poker_eval(game = "holdem", pockets = [ ["tc", "ac"], ["3h", "ah"], ["8c", "6h"]], dead = [], board = ["7h", "3s", "2c"])
print "winners = %s\n" % pokereval.winners(game = "holdem", pockets = [ ["tc", "ac"], ["3h", "ah"], ["8c", "6h"]], dead = [], board = ["7h", "3s", "2c"])
print "result = %s\n" % pokereval.poker_eval(game = "holdem", pockets = [ ["tc", "ac"], ["th", "ah"], ["8c", "6h"]], dead = [], board = ["7h", "3s", "2c", "7s", "7d"])
print "winners = %s\n" % pokereval.winners(game = "holdem", pockets = [ ["tc", "ac"], ["th", "ah"], ["8c", "6h"]], dead = [], board = ["7h", "3s", "2c", "7s", "7d"])
print "winners (filthy pockets) = %s\n" % pokereval.winners(game = "holdem", pockets = [ ["tc", "ac", 255], [], [255, 255], ["th", "ah"], ["8c", "6h"]], dead = [], board = ["7h", "3s", "2c", "7s", "7d"])
print "winners omaha = %s\n" % pokereval.winners(game = "omaha", pockets = [ ["tc", "ac", "ks", "kc" ], ["th", "ah", "qs", "qc" ], ["8c", "6h", "js", "jc" ]], dead = [], board = ["7h", "3s", "2c", "7s", "7d"])
print "winners omaha8 = %s\n" % pokereval.winners(game = "omaha8", pockets = [ ["tc", "ac", "ks", "kc" ], ["th", "ah", "qs", "qc" ], ["8c", "6h", "js", "jc" ]], dead = [], board = ["7h", "3s", "2c", "7s", "7d"])
hand = ["Ac", "As", "Td", "7s", "7h", "3s", "2c"]
best_hand = pokereval.best_hand("hi", hand)
print "best hand from %s = %s" % ( hand, pokereval.best_hand("hi", hand) )
print "best hand from %s = (%s) %s " % (hand, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
hand = ["Ah", "Ts", "Kh", "Qs", "Js" ]
best_hand = pokereval.best_hand("hi", hand)
print "best hand from %s = %s" % ( hand, pokereval.best_hand("hi", hand) )
print "best hand from %s = (%s) %s " % (hand, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
hand = ["2h", "Kh", "Qh", "Jh", "Th" ]
best_hand = pokereval.best_hand("hi", hand)
print "best hand from %s = %s" % ( hand, pokereval.best_hand("hi", hand) )
print "best hand from %s = (%s) %s " % (hand, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
hand = ['2s', '3s', 'Jd', 'Ks', 'As', '4d', '5h', '7d', '9c']
best_hand = pokereval.best_hand("hi", hand)
print "best hand from %s = %s" % ( hand, pokereval.best_hand("hi", hand) )
print "best hand from %s = (%s) %s " % (hand, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
hand = ['As', '2s', '4d', '4s', '5c', '5d', '7s']
best_hand = pokereval.best_hand("low", hand)
print "1/ low hand from %s = %s" % ( hand, pokereval.best("low", hand) )
print "best low hand from %s = (%s) %s " % (hand, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
hand = ['As', '2s', '4d', '4s', '5c', '5d', '8s']
best_hand = pokereval.best_hand("low", hand)
print "2/ low hand from %s = %s" % ( hand, pokereval.best("low", hand) )
print "best low hand from %s = (%s) %s " % (hand, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
hand = ['7d', '6c', '5h', '4d', 'As']
best_hand = pokereval.best_hand("low", hand)
print "3/ low hand from %s = %s" % ( hand, pokereval.best("low", hand) )
print "best low hand from %s = (%s) %s " % (hand, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
board = [ 'As', '4d', '5h', '7d', '9c' ]
hand = [ '2s', 'Ts', 'Jd', 'Ks' ]
best_hand = pokereval.best_hand("low", hand, board)
print "4/ low hand from %s / %s = %s" % ( hand, board, pokereval.best("low", hand, board) )
print "best low hand from %s / %s = (%s) %s " % (hand, board, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
board = [ 'As', '4d', '6h', '7d', '3c' ]
hand = [ '2s', '5s', 'Jd', 'Ks' ]
best_hand = pokereval.best_hand("low", hand, board)
print "low hand from %s / %s = %s" % ( hand, board, pokereval.best("low", hand, board) )
print "best low hand from %s / %s = (%s) %s " % (hand, board, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
board = [ 'Jc', '4c', '3c', '5c', '9c' ]
hand = [ '2c', 'Ac', '5h', '9d' ]
best_hand = pokereval.best_hand("hi", hand, board)
print "hi hand from %s / %s = %s" % ( hand, board, pokereval.best("hi", hand, board) )
print "best hi hand from %s / %s = (%s) %s " % (hand, board, best_hand[0], pokereval.card2string(best_hand[1:]))
print ""
board = [ 'Jd', '9c', 'Jc', 'Tc', '2h' ]
hand = [ '2c', '4c', 'Th', '6s' ]
best_hand = pokereval.best_hand("low", hand, board)
print "5/ low hand from %s / %s = %s" % ( hand, board, pokereval.best("low", hand, board) )
print "best low hand from %s / %s = (%s) %s " % (hand, board, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
print ""
board = [ 'Ks', 'Jd', '7s', '4d', 'Js' ]
hand = [ '2d', '6c', 'Ac', '5c' ]
best_hand = pokereval.best_hand("low", hand, board)
print "6/ low hand from %s / %s = %s" % ( hand, board, pokereval.best("low", hand, board) )
print "best low hand from %s / %s = (%s) %s " % (hand, board, best_hand[0], [ pokereval.card2string(i) for i in best_hand[1:] ])
if len(sys.argv) > 2:
print "f0 result = %s\n" % pokereval.poker_eval(game = "holdem", fill_pockets = 1, pockets = [ ["As", "3s"], ["__", "__"], ["__", "__"]], dead = [], board = ["__", "Qs", "2c", "Ac", "Kc"])
print ""
print "f1 result = %s\n" % pokereval.poker_eval(game = "holdem", fill_pockets = 1, pockets = [ ["As", "3s"], ["__", "__"], ["__", "__"]], dead = [], board = ["7s", "Qs", "2c", "Ac", "Kc"])
print ""
print "f2 result = %s\n" % pokereval.poker_eval(game = "holdem", fill_pockets = 1, iterations = iterations_low, pockets = [ ["As", "3s"], ["__", "__"], ["__", "__"]], dead = [], board = ["__", "__", "__", "__", "__"])
print ""
print "f3 result = %s\n" % pokereval.poker_eval(game = "holdem", fill_pockets = 1, iterations = iterations_high, pockets = [ ["As", "Ac"], ["__", "__"], ["__", "__"]], dead = [], board = ["__", "__", "__", "__", "__"])
print ""
print "f4 result = %s\n" % pokereval.poker_eval(game = "holdem", fill_pockets = 1, iterations = iterations_high, pockets = [ ["As", "Ks"], ["__", "__"], ["__", "__"]], dead = [], board = ["__", "__", "__", "__", "__"])
print ""
print "f5 result = %s\n" % pokereval.poker_eval(game = "holdem", fill_pockets = 1, iterations = iterations_high, pockets = [ ["2s", "2c"], ["__", "__"], ["__", "__"]], dead = [], board = ["__", "__", "__", "__", "__"])
print ""
print "f6 result = %s\n" % pokereval.poker_eval(game = "holdem", fill_pockets = 1, iterations = iterations_high, pockets = [ ["Js", "Jc"], ["__", "__"], ["__", "__"]], dead = [], board = ["__", "__", "__", "__", "__"])
print ""
print "f7 result = %s\n" % pokereval.poker_eval(game = "omaha", fill_pockets = 1, iterations = iterations_high, pockets = [ ["Js", "Jc", "7s", "8c"], ["__", "__", "__", "__"], ["__", "__", "__", "__"]], dead = [], board = ["__", "__", "__", "__", "__"])
print ""
hand = ['As', 'Ad']
print "handval %s = %d " % (hand, pokereval.evaln(hand))
print ""
hand = ['Qc', '7d']
print "handval %s = %d " % (hand, pokereval.evaln(hand))
pokereval = None
| Python |
import sys
from time import time
import random
start = 0
def startTimer():
global start
start = time()
def elapsed():
return time() - start
sys.path.insert(0, ".")
sys.path.insert(0, ".libs")
from pokereval import PokerEval
pokereval = PokerEval()
[a,b,c,d,e] = [12,7,20,24,48]
highs, lows = [], []
def tester():
strengths = []
a=random.randint(0,51)
b=random.randint(0,51)
c=random.randint(0,51)
for i in range(52):
for j in range(i + 1, 52):
if i != a and i != b and i != c and j != a and j != b and j != c:
strength = pokereval.poker_eval(game = 'holdem', pockets = [[i, j], [255,255]], board = [a,b,c,255,255], iterations = 1000)['eval'][0]['ev']
strengths.append(strength)
strengths.sort()
highs.append(strengths[len(strengths) - 1])
lows.append(strengths[0])
return [strengths[117*i] for i in range(0,11)]
def tester2():
strengths = []
a=random.randint(0,51)
b=random.randint(0,51)
c=random.randint(0,51)
d=random.randint(0,51)
for i in range(52):
for j in range(i + 1, 52):
if i != a and i != b and i != c and i != d and j != a and j != b and j != c and j != d:
strength = pokereval.poker_eval(game = 'holdem', pockets = [[i,j], [255,255]], board = [a,b,c,d,255], iterations = 1000)['eval'][0]['ev']
strengths.append(strength)
strengths.sort()
highs.append(strengths[len(strengths) - 1])
lows.append(strengths[0])
return [strengths[112*i] for i in range(0,11)]
def speedTest():
startTimer()
for i in range(1000):
if i % 50 == 0:
print i
tester2()
print elapsed()
| Python |
#
# Copyright (C) 2007, 2008 Loic Dachary <loic@dachary.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# http://gna.org/support/?1823
#
import sys
sys.path.insert(0, ".")
sys.path.insert(0, ".libs")
from pokereval import PokerEval
pokereval = PokerEval()
result = pokereval.poker_eval(game = "holdem", fill_pockets = 1, pockets = [ ["As", "3s"], ["__", "__"]], dead = [], board = ["Ad", "Qs", "2c", "Ac", "Kc"])
assert result == {'info': (990, 0, 1), 'eval': [{'winlo': 0, 'tielo': 0, 'winhi': 877, 'scoop': 877, 'loselo': 0, 'ev': 903, 'losehi': 78, 'tiehi': 35}, {'winlo': 0, 'tielo': 0, 'winhi': 78, 'scoop': 78, 'loselo': 0, 'ev': 96, 'losehi': 877, 'tiehi': 35}]}
result = pokereval.poker_eval(game = "omaha8", fill_pockets = 1, pockets = [ ["As", "3s", "2s", "6s"], ["__", "__", "__", "__"]], dead = [], board = ["Ad", "Qs", "2c", "7c", "5c"])
assert result == {'info': (123410, 1, 1), 'eval': [{'winlo': 109375, 'tielo': 5361, 'winhi': 73190, 'scoop': 69661, 'loselo': 8674, 'ev': 753, 'losehi': 48978, 'tiehi': 1242}, {'winlo': 8674, 'tielo': 5361, 'winhi': 48978, 'scoop': 8674, 'loselo': 68788, 'ev': 246, 'losehi': 73190, 'tiehi': 1242}]}
| Python |
#
# Copyright (C) 2007, 2008 Loic Dachary <loic@dachary.org>
# Copyright (C) 2004, 2005, 2006 Mekensleep
#
# Mekensleep
# 24 rue vieille du temple
# 75004 Paris
# licensing@mekensleep.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Authors:
# Loic Dachary <loic@dachary.org>
#
#
import sys
_pokereval = __import__('_pokereval_' + sys.version[0] + '_' + sys.version[2])
from types import *
class PokerEval:
"""\
Evaluate the strengh of a poker hand for a given poker variant.
In all methods, when a list of cards is to be provided (for instance
with the "hand" argument of the "best" method), each member of the
list may be a number or a string designating a card according to
the following table:
2h/00 2d/13 2c/26 2s/39
3h/01 3d/14 3c/27 3s/40
4h/02 4d/15 4c/28 4s/41
5h/03 5d/16 5c/29 5s/42
6h/04 6d/17 6c/30 6s/43
7h/05 7d/18 7c/31 7s/44
8h/06 8d/19 8c/32 8s/45
9h/07 9d/20 9c/33 9s/46
Th/08 Td/21 Tc/34 Ts/47
Jh/09 Jd/22 Jc/35 Js/48
Qh/10 Qd/23 Qc/36 Qs/49
Kh/11 Kd/24 Kc/37 Ks/50
Ah/12 Ad/25 Ac/38 As/51
The string __ (two underscore) or the number 255 are placeholders
meaning that the card is unknown.
"""
def best(self, side, hand, board = []):
"""\
Return the best five card combination that can be made with the cards
listed in "hand" and, optionally, board. The "side" may be "hi" or
"low". The "board" argument must only be provided for variants where
knowing if a given card is taken from the board or not is significant
(such as Omaha but not Holdem).
A list is returned. The first element is the numerical value
of the hand (better hands have higher values if "side" is "hi" and
lower values if "side" is "low"). The second element is a list whose
first element is the strength of the hand among the following:
Nothing (only if "side" equals "low")
NoPair
TwoPair
Trips
Straight
Flush
FlHouse
Quads
StFlush
The last five elements are numbers describing the best hand properly
sorted (for instance the ace is at the end for no pair if "side" is low or
at the beginning if "side" high).
Examples:
[134414336, ['StFlush', 29, 28, 27, 26, 38]] is the wheel five to ace, clubs
[475920, ['NoPair', 45, 29, 41, 39, 51]] is As, 8s, 5c, 4s, 2s
[268435455, ['Nothing']] means there is no qualifying low
"""
if len(hand + board) >= 5:
return _pokereval.eval_hand(side, hand, board)
else:
return False
def best_hand(self, side, hand, board = []):
"""\
Return the best five card combination that can be made with the cards
listed in "hand" and, optionaly, board. The "side" may be "hi" or
"low". The returned value is the second element of the list returned
by the "best" method.
"""
if len(hand + board) >= 5:
return _pokereval.eval_hand(side, hand, board)[1]
else:
return False
def best_hand_value(self, side, hand, board = []):
"""\
Return the best five card combination that can be made with the cards
listed in "hand" and, optionaly, board. The "side" may be "hi" or
"low". The returned value is the first element of the list returned
by the "best" method.
"""
if len(hand + board) >= 5:
return _pokereval.eval_hand(side, hand, board)[0]
else:
return False
def evaln(self, cards):
"""\
Call the poker-eval Hand_EVAL_N function with the "cards" argument.
Return the strength of the "cards" as a number. The higher the
better.
"""
return _pokereval.evaln(cards)
def winners(self, *args, **kwargs):
"""\
Return a list of the indexes of the best hands, relative to the "pockets"
keyword argument. For instance, if the first pocket and third pocket cards
tie, the list would be [0, 2]. Since there may be more than one way to
win a hand, a hash is returned with the list of the winners for each so
called side. For instace {'hi': [0], 'low': [1]} means pocket cards
at index 0 won the high side of the hand and pocket cards at index 1
won the low side.
See the"poker_eval" method for a detailed
explanation of the semantics of the arguments.
If the keyword argument "fill_pockets" is set, pocket cards
can contain a placeholder (i.e. 255 or __) that will be be
used as specified in the "poker_eval" method documentation.
If the keyword argument "fill_pockets" is not set, pocket cards
that contain at least one placeholder (i.e. 255 or __) are
ignored completly. For instance if winners is called as follows
o.winners(game = 'holdem', pockets = [ [ '__', 'As' ], [ 'Ks', 'Kd'] ])
it is strictly equivalent as calling
o.winners(game = 'holdem', pockets = [ [ 'Ks', 'Kd'] ]).
"""
index2index = {}
normalized_pockets = []
normalized_index = 0
pockets = kwargs["pockets"][:]
for index in xrange(len(pockets)):
if not kwargs.has_key("fill_pockets"):
if 255 in pockets[index] or "__" in pockets[index]:
pockets[index] = []
if pockets[index] != []:
normalized_pockets.append(pockets[index])
index2index[index] = normalized_index
normalized_index += 1
kwargs["pockets"] = normalized_pockets
results = _pokereval.poker_eval(*args, **kwargs)
(count, haslopot, hashipot) = results.pop(0)
winners = { 'low': [], 'hi': [] }
for index in xrange(len(pockets)):
if index2index.has_key(index):
result = results[index2index[index]]
if result[1] == 1 or result[3] == 1:
winners["hi"].append(index)
if result[4] == 1 or result[6] == 1:
winners["low"].append(index)
if not haslopot or len(winners["low"]) == 0:
del winners["low"]
if not hashipot:
del winners["hi"]
return winners
def poker_eval(self, *args, **kwargs):
"""\
Provided with a description of a poker game, return the outcome (if at showdown) or
the expected value of each hand. The poker game description is provided as a set
of keyword arguments with the following meaning:
game : the variant (holdem, holdem8, omaha, omaha8, 7stud, 7stud8, razz,
5draw, 5draw8, 5drawnsq, lowball, lowball27).
Mandatory, no default.
pockets : list of pocket cards for each player still in game. Each member
of the list is a list of cards. The position of the pocket cards
in the list is meaningfull for the value returned will refer to
this position when stating which player wins, tie or loose.
Example: [ ["tc", "ac"], ["3h", "ah"], ["8c", "6h"]]
Cards do not have to be real cards like "tc" or "4s". They may also be a
placeholder, denoted by "__" or 255. When using placeholders, the
keyword argument "iterations" may be specified to use Monte Carlo instead of
exhaustive exploration of all the possible combinations.
Example2: [ ["tc", "__"], [255, "ah"], ["8c", "6h"]]
Mandatory, no default.
board : list of community cards, for games where this is meaningfull. If
specified when irrelevant, the return value cannot be predicted.
Default: []
dead : list of dead cards. These cards won't be accounted for when exloring
the possible hands.
Default: []
iterations: the maximum number of iterations when exploring the
possible outcome of a given hand. Roughly speaking, each
iteration means to distribute cards that are missing (for
which there are place holders in the board or pockets
keywords arguments, i.e. 255 or __). If the number of
iterations is not specified and there are place holders,
the return value cannot be predicted.
Default: +infinite (i.e. exhaustive exploration)
Example: object.poker_eval(game = "holdem",
pockets = [ ["tc", "ac"], ["3h", "ah"], ["8c", "6h"]],
dead = [],
board = ["7h", "3s", "2c"])
The return value is a map of two entries:
'info' contains three integers:
- the number of samples (which must be equal to the number of iterations given
in argument).
- 1 if the game has a low side, 0 otherwise
- 1 if the game has a high side, 0 otherwise
'eval' is a list of as many maps as there are pocket cards, each
made of the following entries:
'scoop': the number of time these pocket cards scoop
'winhi': the number of time these pocket cards win the high side
'losehi': the number of time these pocket cards lose the high side
'tiehi': the number of time these pocket cards tie for the high side
'winlo': the number of time these pocket cards win the low side
'loselo': the number of time these pocket cards lose the low side
'tielo': the number of time these pocket cards tie for the low side
'ev': the EV of these pocket cards as an int in the range [0,1000] with
1000 being the best.
It should be clear that if there is only one sample (i.e. because all the
cards are known which is the situation that occurs at showdown) the details
provided by the 'eval' entry is mostly irrelevant and the caller might
prefer to call the winners method instead.
"""
result = _pokereval.poker_eval(*args, **kwargs)
return {
'info': result[0],
'eval': [ { 'scoop': x[0],
'winhi': x[1],
'losehi': x[2],
'tiehi': x[3],
'winlo': x[4],
'loselo': x[5],
'tielo': x[6],
'ev': int(x[7] * 1000) } for x in result[1:] ]
}
def deck(self):
"""\
Return the list of all cards in the deck.
"""
return [ self.string2card(i + j) for i in "23456789TJQKA" for j in "hdcs" ]
def nocard(self):
"""Return 255, the numerical value of a place holder in a list of cards."""
return 255
def string2card(self, cards):
"""\
Convert card names (strings) to card numbers (integers) according to the
following map:
2h/00 2d/13 2c/26 2s/39
3h/01 3d/14 3c/27 3s/40
4h/02 4d/15 4c/28 4s/41
5h/03 5d/16 5c/29 5s/42
6h/04 6d/17 6c/30 6s/43
7h/05 7d/18 7c/31 7s/44
8h/06 8d/19 8c/32 8s/45
9h/07 9d/20 9c/33 9s/46
Th/08 Td/21 Tc/34 Ts/47
Jh/09 Jd/22 Jc/35 Js/48
Qh/10 Qd/23 Qc/36 Qs/49
Kh/11 Kd/24 Kc/37 Ks/50
Ah/12 Ad/25 Ac/38 As/51
The "cards" argument may be either a list in which case a converted list
is returned or a string in which case the corresponding number is
returned.
"""
if type(cards) is ListType or type(cards) is TupleType:
return [ _pokereval.string2card(card) for card in cards ]
else:
return _pokereval.string2card(cards)
def card2string(self, cards):
"""\
Convert card numbers (integers) to card names (strings) according to the
following map:
2h/00 2d/13 2c/26 2s/39
3h/01 3d/14 3c/27 3s/40
4h/02 4d/15 4c/28 4s/41
5h/03 5d/16 5c/29 5s/42
6h/04 6d/17 6c/30 6s/43
7h/05 7d/18 7c/31 7s/44
8h/06 8d/19 8c/32 8s/45
9h/07 9d/20 9c/33 9s/46
Th/08 Td/21 Tc/34 Ts/47
Jh/09 Jd/22 Jc/35 Js/48
Qh/10 Qd/23 Qc/36 Qs/49
Kh/11 Kd/24 Kc/37 Ks/50
Ah/12 Ad/25 Ac/38 As/51
The "cards" argument may be either a list in which case a converted list
is returned or an integer in which case the corresponding string is
returned.
"""
if type(cards) is ListType or type(cards) is TupleType:
return [ _pokereval.card2string(card) for card in cards ]
else:
return _pokereval.card2string(cards)
| Python |
import sys
import random
from time import time
sys.path.append('./pypoker-eval-137.0')
sys.path.append('./pypoker-eval-137.0/.libs')
from pokereval import PokerEval
start = 0
pokereval = PokerEval()
file = open('preflopranks.txt','r')
data = file.read()
A=[float(i) for i in str(data).split('\t')]
P = [A[13*i:13*i+13] for i in range(13)]
MY_PREFLOP_AGGRO, MY_PREFLOP_TIGHT, MY_POSTFLOP_AGGRO, MY_POSTFLOP_TIGHT, MY_SLOWPLAY, MY_BLUFF, MY_BIG_BET = 0.25, 0.6, 0.45, 0.7, 0.1, 0.15, 0.5
OPP_PREFLOP_AGGRO, OPP_PREFLOP_TIGHT, OPP_POSTFLOP_AGGRO, OPP_POSTFLOP_TIGHT, OPP_SLOWPLAY, OPP_BLUFF = 0.35, 0.65, 0.35, 0.5, 0.2, 0.2
def startTimer():
global start
start = time()
def elapsed():
return time() - start
class Card():
ranks = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K', 'A']
suits = ['clubs', 'diamonds', 'hearts', 'spades']
def __init__(self, id):
self.id = id
self.rank = id % 13
self.suit = id / 13
def compareTo(self, card):
return cmp(self.rank, card.rank)
def equals(self, card):
return self.id == card.id
def write(self):
print Card.ranks[self.rank], 'of', Card.suits[self.suit]
class Hand():
def __init__(self, cards):
self.cards = cards
# Adds the given cards to the hand
def add(self, cards):
self.cards.extend(cards)
# Returns combination of current and given hands, leaving originals unchanged
def combine(self, hand):
copy = list(self.cards)
copy.extend(hand.cards)
return Hand(copy)
def size(self):
return len(self.cards)
def rankCount(self):
rankCount = [0] * 13
for i in self.cards:
rankCount[i.rank] += 1
return rankCount
def suited(self):
for i in self.cards[1:]:
if i.suit != self.cards[0].suit:
return False
return True
def write(self):
for card in self.cards:
card.write()
class Deck():
cards = [1] * 52
size = 52
@staticmethod
def add(card):
if Deck.cards[card] == 0:
Deck.cards[card] = 1
Deck.size += 1
@staticmethod
def addAll(hand):
for card in hand.cards:
Deck.add(card.id)
@staticmethod
def remove(card):
if Deck.cards[card] == 1:
Deck.cards[card] = 0
Deck.size -= 1
return True
else:
return False
@staticmethod
def removeAll(hand):
for card in hand.cards:
Deck.remove(card.id)
@staticmethod
def contains(card):
return Deck.cards[card] == 1
@staticmethod
def deal(n):
hand = Hand([])
for i in range(n):
card = Card(random.randint(0, 51))
while Deck.cards[card.id] == 0:
card = Card(random.randint(0, 51))
Deck.remove(card.id)
hand.add([card])
return hand
@staticmethod
def shuffle():
Deck.cards = [1] * 52
Deck.size = 52
class Pot():
size = 0
bets = {}
raiseCounts = {}
minRaiseAmt = 2
@staticmethod
def add(player, bet):
Pot.minRaiseAmt = max(Pot.minRaiseAmt, Pot.bets[player] + bet - max(Pot.bets.values()))
Pot.size += bet
Pot.bets[player] += bet
@staticmethod
def setSize(size):
Pot.size = size
@staticmethod
def toCall(player):
return max(Pot.bets.values()) - Pot.bets[player]
@staticmethod
def newGame():
Pot.size = 0
Pot.bets = {Player.me: 0, Player.leftOpp: 0, Player.rightOpp: 0}
Pot.raiseCounts = {Player.me: 0, Player.leftOpp: 0, Player.rightOpp: 0}
Pot.minRaiseAmt = Game.bb
@staticmethod
def newHand():
Pot.size = 0
Pot.bets = {Player.me: 0, Player.leftOpp: 0, Player.rightOpp: 0}
Pot.raiseCounts = {Player.me: 0, Player.leftOpp: 0, Player.rightOpp: 0}
Pot.minRaiseAmt = Game.bb
@staticmethod
def newStreet():
Pot.bets = {Player.me: 0, Player.leftOpp: 0, Player.rightOpp: 0}
Pot.raiseCounts = {Player.me: 0, Player.leftOpp: 0, Player.rightOpp: 0}
Pot.minRaiseAmt = Game.bb
class Action():
FOLD, CALL, RAISE, POST, DEAL, SHOW, WIN, OTHER = 0, 1, 2, 3, 4, 5, 6, 7
def __init__(self, action):
spl = action.split(":")
self.player = Player.getPlayer(spl[1])
self.type = getattr(self, self.mapType(spl[0]))
if len(spl) == 3:
self.val = int(spl[2])
elif len(spl) == 4:
self.val = Hand([parseCard(spl[2]), parseCard(spl[3])])
else:
self.val = None
if self.type == Action.CALL:
self.val = Pot.toCall(self.player)
def mapType(self, type):
if type == "FOLD" or type == "CALL" or type == "RAISE" or type == "POST" or type == "DEAL" or type == "WIN":
return type
elif type == "CHECK":
return "CALL"
elif type == "BET":
return "RAISE"
elif type == "SHOWS":
return "SHOW"
else:
return "OTHER"
def perform(self):
if self.type == Action.FOLD:
self.player.folded = True
self.player.foldedCount += 1
self.player.dist = [0] * 51
if self.player == Player.leftOpp:
Player.me.adjHS1 = 0
elif self.player == Player.rightOpp:
Player.me.adjHS2 = 0
Game.playersLeft -= 1
elif self.type == Action.CALL:
self.player.bet(Pot.toCall(self.player))
elif self.type == Action.RAISE:
self.player.bet(self.val - Pot.bets[self.player])
Pot.raiseCounts[self.player] += 1
elif self.type == Action.POST:
self.player.bet(self.val)
elif self.type == Action.DEAL:
Game.newStreet()
elif self.type == Action.SHOW:
Game.street = 4
self.player.hand = self.val
elif self.type == Action.WIN:
self.player.winCount += 1
else:
pass
if self.player is not None:
self.player.actions[Game.street].append(self)
def isType(self, type):
return self.type == getattr(self, type)
class Player():
me, leftOpp, rightOpp = None, None, None
def __init__(self, name):
self.name = name
self.position = 0
self.hand = Hand([])
self.stack = 200
self.actions = [[] for i in range(5)]
self.time = 10000
self.folded = False
self.preflopAggro, self.preflopTight = OPP_PREFLOP_AGGRO, OPP_PREFLOP_TIGHT
self.postflopAggro, self.postflopTight = OPP_POSTFLOP_AGGRO, OPP_POSTFLOP_TIGHT
self.slowplay = OPP_SLOWPLAY
self.bluff = OPP_BLUFF
self.bigBet = MY_BIG_BET
self.largestBet = 0
self.winCount, self.foldedCount, self.sawFlopCount, self.bigBetCount = 0, 0, 0, 0
self.winPct, self.foldedPct, self.sawFlopPct, self.bigBetPct = 0, 0, 0, 0
self.dist = [0] * 51
self.preflopActions, self.postflopActions = [], []
self.preflopStrengths, self.postflopStrengths = [], []
self.absHS, self.adjHS1, self.adjHS2, self.draws = 0, 0, 0, 0
def newHand(self, data = None):
if self == Player.me:
self.position = int(data[2])
Player.leftOpp.position = (self.position + 1) % 3
Player.rightOpp.position = (self.position + 2) % 3
self.hand = Hand([parseCard(data[3]), parseCard(data[4])])
self.stack = Game.maxStack
self.actions = [[] for i in range(5)]
self.folded = False
self.largestBet = 0
self.winPct, self.foldedPct, self.sawFlopPct, self.bigBetPct = float(self.winCount) / Game.handId, float(self.foldedCount) / Game.handId, float(self.sawFlopCount) / Game.handId, float(self.bigBetCount) / Game.handId
self.dist = [0] * 51
self.absHS, self.adjHS1, self.adjHS2, self.draws = 0, 0, 0, 0
else:
self.hand = Hand([])
self.stack = Game.maxStack
self.actions = [[] for i in range(5)]
self.folded = False
self.largestBet = 0
self.winPct, self.foldedPct, self.sawFlopPct, self.bigBetPct = float(self.winCount) / Game.handId, float(self.foldedCount) / Game.handId, float(self.sawFlopCount) / Game.handId, float(self.bigBetCount) / Game.handId
self.dist = [0] * 51
self.absHS, self.adjHS1, self.adjHS2, self.draws = 0, 0, 0, 0
def bet(self, amt):
if amt > self.largestBet:
self.largestBet = amt
if amt > 100:
self.bigBetCount += 1
elif amt > 70:
self.bigBetCount += 0.5
Pot.add(self, amt)
self.stack -= amt
# Get player with given name
@staticmethod
def getPlayer(name):
if name == Player.me.name:
return Player.me
elif name == Player.leftOpp.name:
return Player.leftOpp
elif name == Player.rightOpp.name:
return Player.rightOpp
elif name is not None and name not in ['FLOP', 'TURN', 'RIVER']:
return Player.me
else:
return None
class Game():
# Game variables
gameId, numHands, maxStack, bb, sb, maxTime = 1, 100, 200, 2, 1, 10000
# Hand variables
handId, community, playersLeft = 2, Hand([]), 3
# Street variables
street, lastStreet = 0, -1
@staticmethod
def newGame(data):
Game.gameId = int(data[1])
Player.me = Player('FlipFlop' + str(sys.argv[1]))
Player.me.preflopAggro, Player.me.preflopTight, Player.me.postflopAggro, Player.me.postflopTight, Player.me.slowplay, Player.me.bluff = MY_PREFLOP_AGGRO, MY_PREFLOP_TIGHT, MY_POSTFLOP_AGGRO, MY_POSTFLOP_TIGHT, MY_SLOWPLAY, MY_BLUFF
Player.leftOpp = Player(data[2])
Player.rightOpp = Player(data[3])
Pot.newGame()
Game.numHands, Game.maxStack, Game.bb, Game.sb, Game.maxTime = int(data[4]), int(data[5]), int(data[6]), int(data[7]), float(data[8])
@staticmethod
def newHand(data):
Game.handId, Game.community, Game.playersLeft, Game.street, Game.lastStreet = int(data[1]), Hand([]), 2, 0, -1
Player.me.newHand(data)
Player.leftOpp.newHand()
Player.rightOpp.newHand()
Pot.newHand()
@staticmethod
def newStreet():
Game.street += 1
Pot.newStreet()
# Raise prob, call prob, fold prob
def smarty(handDesc):
raiseProb = 0
return normalize([max(.8 - handDesc[1], 0), handDesc[1] - Player.me.toCall() * handDesc[1] ** 2, Player.me.toCall() * handDesc[1] ** 2])
def normalize(probs):
n = sum(probs)
probs = [probs[i] / n for i in range(len(probs))]
# Scale win percentage after flop to percentile (est. lowest ev is 157 and highest ev is 816)
def flopScaleWin(pct):
a = (816 - 157) / 1077.
b = 157.
percentile = 1 - (pct - b) / (1077 * a)
if percentile < 0:
return 0
elif percentile > 1:
return 1
return percentile
# Scale win percentage after turn to percentile (est. lowest ev 098 is and highest ev is 868)
def turnScaleWin(pct):
a = (868 - 98) / 1033.
b = 98.
percentile = 1 - (pct - b) / (1033 * a)
if percentile < 0:
return 0
elif percentile > 1:
return 1
return percentile
def parseEval(data):
return data['eval'][0]['ev']
def parseCard(s):
ranks = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
suits = ['c', 'd', 'h', 's']
return Card(ranks.index(s[0].upper()) + 13 * suits.index(s[1].lower()))
def makeHand(cards):
hand = Hand([])
for card in cards:
hand.add([parseCard(card)])
Deck.removeAll(hand)
return hand
def tester():
Game.newGame('NEWGAME 1 testbot3001 testbot3002 100 200 2 1 10000.00000'.split(' '))
Game.newHand('NEWHAND 1 0 Ac 9d 0 0 0 10000.00000'.split(' '))
| Python |
from utils import *
# player has position, hand, actions, folded
# behavior - tightness, preflopAggro, preflopTight, postflop, slowplay, bluff, dist
# action - Action('RAISE:40'), interpret action.val, action.isType
# game has community, street, playersLeft
def preflopBot(player):
strength = preflopStrength(player.hand)
if player.position == 0:
return (strength <= .8 * player.preflopTight, preflopRaise(strength, 1.4 * player.preflopAggro), callUpTo(strength, 1.1 * player.preflopTight))
# dealer turn 1
elif player.position == 1:
if Game.playersLeft == 1:
return (strength <= 1.4 * player.preflopTight, preflopRaise(strength, 1.5 * player.preflopAggro), callUpTo(strength, 1.2 * player.preflopTight))
else:
return (strength <= 1.25 * player.preflopTight, preflopRaise(strength, 1.2 * player.preflopAggro), callUpTo(strength, player.preflopTight))
# small blind turn 1
else:
if Game.playersLeft == 1:
return (True, preflopRaise(strength, 1.2 * player.preflopAggro), callUpTo(strength, 1.1 * player.preflopTight))
else:
return (True, preflopRaise(strength, player.preflopAggro), callUpTo(strength, player.preflopTight))
# big blind
# return (call, raise, callUpTo), raise implies call up to 2 bb more
def preflopStrength(hand):
# lookup hand
(c1, c2) = (c.rank for c in hand.cards)
if c1 > c2:
(c1, c2) = (c2, c1)
if hand.suited():
strength = P[12 - c2][12 - c1]
else:
strength = P[12 - c1][12 - c2]
return strength
def preflopRaise(strength, aggression):
if strength <= .04:
betRange = (6, 15)
elif strength < .5 * aggression:
betRange = (5, 12)
elif strength < 1.2 * aggression:
betRange = (0, 6)
else:
betRange = (0, 0)
x = random.random()
return int(betRange[0] * x + betRange[1] * (1 - x))
def callUpTo(strength, tightness):
if strength <= .05:
amt = 200
elif strength <= .10:
amt = (270 - 2500 * strength)
elif strength <= .5 * tightness:
amt = max(30 - 90 * strength, 7)
elif strength <= .8 * tightness:
amt = 6
elif strength <= tightness:
amt = 4
else:
amt = 0
if Player.me.largestBet >= 6:
amt += Player.largestBet
if Game.handId > 40 and Player.me.sawFlopPct < .35:
return amt + 3
elif Game.handId > 40 and Player.me.sawFlopPct < .5:
return amt + 1
return amt
| Python |
from utils import *
def postflopBot(player):
# player.postflopAggro, postflopTight, absHS, adjHS1, adjHS2, draws, Pot.size, pot.toCall(Player)
if player.position == 1 or (player.position == 2 and player.rightOpp.folded):
(aggression, tightness) = (.9 * player.postflopAggro, .9 * player.postflopTight)
else:
if Pot.toCall(player) == 0:
(aggression, tightness) = (1.2 * player.postflopAggro, 1.1 * player.postflopTight)
else:
(aggression, tightness) = (.7 * player.postflopAggro, 1.1 * player.postflopTight)
player.bigBet *= 1.3
raiseIndex = max(1 - player.adjHS1, 0)**.9 * max(1 - player.adjHS2, 0)**.9 + .3 * player.draws + .13
callIndex = max(.90 - player.adjHS1, 0) * max(.90 - player.adjHS2, 0) + .5 * player.draws - .03
if Game.playersLeft == 2:
callIndex += .05
if callIndex < .05:
callIndex = .05
if player.absHS < .06:
callIndex = 1
raiseChance = postflopRaise(player, raiseIndex, aggression)
if player.absHS < .15 and player.draws < .05:
raiseChance *= 1.1
elif player.absHS < .3 and player.draws < .05:
raiseChance *= 1.05
if Pot.size > 35:
raiseChance *= 1.2
callUpTo = (1 - callIndex) / callIndex * (tightness / MY_POSTFLOP_TIGHT)
x = random.random()
if x < raiseChance:
raiseAmt = (.3 * raiseChance + .1 * (x - raiseChance * .5)) * player.bigBet / .5
else:
raiseAmt = 0
if raiseIndex > .7:
raiseAmt *= (raiseIndex + .45)
return (raiseAmt, callUpTo)
def postflopRaise(player, raiseIndex, aggression):
if Game.street == 3:
player.slowplay = 0
if raiseIndex > .8:
return 1 - player.slowplay
elif raiseIndex + aggression > 1:
return min(1 - player.slowplay, raiseIndex)
elif raiseIndex + aggression > .8:
return max(player.bluff, .75 * raiseIndex)
return player.bluff
def strategize(player):
# char = (aggro, tight, slowplay, bluff, winPct, foldedPct)
char1 = [player.leftOpp.postflopAggro, player.leftOpp.postflopTight, player.leftOpp.slowplay, player.leftOpp.bluff, player.leftOpp.winPct, player.leftOpp.foldedPct]
char2 = [player.rightOpp.postflopAggro, player.rightOpp.postflopTight, player.rightOpp.slowplay, player.rightOpp.bluff, player.rightOpp.winPct, player.rightOpp.foldedPct]
if player.leftOpp.folded:
char = char2
elif player.rightOpp.folded:
char = char1
else:
char = [(i[0] + i[1]) / 2. for i in zip(char1, char2)]
# oppAggro = max(char[0] - .2 * (char[2] - .2) + .4 * (char[3] - .2), 0)
# oppTight = min(char[1] + .4 * (char[3] - .2), 1)
if Game.handId > 30:
oppTight = .5 * (char[1] + 1 - .8 * char[5])
oppAggro = .3 * char[4] + .7 * char[0]
else:
oppTight = OPP_POSTFLOP_TIGHT
oppAggro = OPP_POSTFLOP_AGGRO
player.slowplay = checkSlowplay(MY_SLOWPLAY + .5 * (oppAggro - OPP_POSTFLOP_AGGRO))
player.bluff = checkBluff(MY_BLUFF - .7 * (oppTight - OPP_POSTFLOP_TIGHT))
if Game.handId > 60:
player.postflopAggro = checkAggro(MY_POSTFLOP_AGGRO + .5 * (char[5] - .5))
player.postflopTight = checkTight(MY_POSTFLOP_TIGHT + (oppAggro - OPP_POSTFLOP_AGGRO) + 1.3 * (oppTight - .6))
player.bigBet = checkBigBet(MY_BIG_BET + 1.2 * (oppTight - OPP_POSTFLOP_TIGHT))
elif Game.handId > 25:
player.postflopAggro = checkAggro(MY_POSTFLOP_AGGRO + .3 * (char[5] - .5))
player.postflopTight = checkTight(MY_POSTFLOP_TIGHT + .4 * (oppAggro - OPP_POSTFLOP_AGGRO + oppTight - .6))
player.bigBet = checkBigBet(MY_BIG_BET + .5 * (oppTight - OPP_POSTFLOP_TIGHT))
def checkSlowplay(slowplay):
if slowplay < MY_SLOWPLAY - .05:
return MY_SLOWPLAY - .05
if slowplay > MY_SLOWPLAY + .15:
return MY_SLOWPLAY + .15
return slowplay
def checkBluff(bluff):
if bluff < MY_BLUFF - .1:
return MY_BLUFF - .1
if bluff > MY_BLUFF + .15:
return MY_BLUFF + .15
return bluff
def checkAggro(aggro):
if aggro < MY_POSTFLOP_AGGRO - .15:
return MY_POSTFLOP_AGGRO - .15
if aggro > MY_POSTFLOP_AGGRO + .15:
return MY_POSTFLOP_AGGRO + .15
return aggro
def checkTight(tight):
if tight < MY_POSTFLOP_TIGHT - .15:
return MY_POSTFLOP_TIGHT - .15
if tight > MY_POSTFLOP_TIGHT + .15:
return MY_POSTFLOP_TIGHT + .15
return tight
def checkBigBet(bigBet):
if bigBet < MY_BIG_BET - .15:
return MY_BIG_BET - .15
if bigBet > MY_BIG_BET + .15:
return MY_BIG_BET + .15
return bigBet
def postflopStrength(hand, community, street):
unknowns = 2
pockets = [[card.id for card in hand.cards], [255, 255]]
board = [card.id for card in community.cards]
unknowns += 5 - len(board)
board = board + [255] * (5 - len(board))
strength = parseEval(pokereval.poker_eval('holdem', pockets, board, iterations = 30000))
if street == 1:
strength = flopScaleWin(strength)
elif street == 2:
strength = turnScaleWin(strength)
elif street == 3:
strength = 1 - strength / 1000.
return strength
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
print ">> Creating index.js"
url_base = "http://flightgear-gallery.googlecode.com/svn/trunk/v2.0"
image_path = "v2.0/thumbs/"
image_list = []
for img in os.listdir(image_path):
if not img.startswith('.'):
image_list.append( {'image': '%s/images/%s' % (url_base, img), 'thumb': '%s/thumbs/%s' % (url_base, img)} )
#print img
json_str = json.dumps( {'gallery': image_list} )
json_file = open("./gallery.js", "w")
json_file.write( json_str )
json_file.close()
print " done"
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
print ">> Creating index.js"
url_base = "http://flightgear-gallery.googlecode.com/svn/trunk/v2.0"
image_path = "v2.0/thumbs/"
image_list = []
for img in os.listdir(image_path):
if not img.startswith('.'):
image_list.append( {'image': '%s/images/%s' % (url_base, img), 'thumb': '%s/thumbs/%s' % (url_base, img)} )
#print img
json_str = json.dumps( {'gallery': image_list} )
json_file = open("./gallery.js", "w")
json_file.write( json_str )
json_file.close()
print " done"
| Python |
import os
import sys
"""
cachebuilder.py by Peter Balogh palexanderbalogh@yahoo.com
to be used in conjunction with CacheBuilder.as
part of a utility to
a) load a large number of files into the browser cache, so that you can pre-load files
for instant access later, and
b) provide real, accurate feedback during this preloading process, using knowledge of file sizes and containing folders
this script gets invoked -- either by ant or at the command line --
with a series of arguments that tell it which folders to scan for files.
and it will scan those folders recursively,
which means it will get nested folders -- you don't need to specify each one.
"""
totalsize = 0 # the total size of all files we encounter
xmlstring = ""
def walkfun ( arg, current_directory, file_list):
global folderxmlstring
global foldersize
if current_directory.find( ".svn" ) == -1:
for fi in file_list:
f = os.path.join ( current_directory, fi )
if not os.path.isdir( f ):
print "for file " + f + ", in " + arg + ", size is ",
print os.path.getsize( f )
folderxmlstring += "<file name='" + f + "' size='" + str( os.path.getsize( f ) ) + "' />"
foldersize += os.path.getsize( f )
global totalsize
totalsize += foldersize
totalxmlstring = ""
# don't start with 0, since the first arg would always be the name of this script....
for foldername in sys.argv[ 1: ]:
foldersize = 0
folderxmlstring = ""
os.path.walk( foldername, walkfun, foldername ) # recursively dig into each folder
folderxmlstring = "<folder name='" + foldername + "' size='" + str( foldersize ) + "' >" + folderxmlstring + "</folder>"
print "folderxmlstring=", folderxmlstring
totalxmlstring += folderxmlstring
totalxmlstring = "<files size='" + str( totalsize ) + "' >" + totalxmlstring + "</files>"
print "totalsize = ", totalsize
print "totalxmlstring = ", totalxmlstring
f = open( "preloadcache.xml", "w" )
f.write( totalxmlstring )
f.close()
| Python |
from glob import glob
filelist = [ i.split(".as")[0] for i in glob( "*.as" ) if i.find("ListOf") == -1 and i != "FunctionNode.as"]
arraystring = '"'
arraystring += '",\n \t\t"'.join( filelist )
print arraystring
outfile = open("ListOfNodes.as", "w" )
outfile.write("""
/**
* @author pbalogh
*/
class functionnodes.ListOfNodes {
public static var LIST = [
""")
outfile.write( arraystring )
outfile.write( """\"];
public function ListOfNodes()
{
var fn:Function;
""")
outfile.write( "".join( [ "\t\tfn = functionnodes." + i + ";\n" for i in filelist] ) )
outfile.write( """
}
}
""")
outfile.close()
| Python |
from glob import glob
filelist = [ i.split(".as")[0] for i in glob( "*.as" ) if i.find("ListOf") == -1 and i != "FunctionNode.as"]
arraystring = '"'
arraystring += '",\n \t\t"'.join( filelist )
print arraystring
outfile = open("ListOfNodes.as", "w" )
outfile.write("""
/**
* @author pbalogh
*/
package functions
{
public class ListOfNodes
{
public static var LIST : Array = [
""")
outfile.write( arraystring )
outfile.write( """\"];
public function ListOfNodes()
{
var fn : Class;
""")
outfile.write( "".join( [ "\t\t\tfn = functions." + i + ";\n" for i in filelist] ) )
outfile.write( """
}
}
}
""")
outfile.close()
| Python |
#! /bin/env python
###################################################################################
# #
# Stitch v3.0, #
# http://www.jportsmouth.com/code/Stitch/stitch.html #
# Copyright (C) 2009-2010 Jamie Portsmouth (jamports@mac.com) #
# Multithreading contributed by Morgan Tørvolt (morgan@torvolt.com) #
# #
# Stitch is a Python script to assemble large Google maps. A rectangle of #
# latitude and longitude is specified, together with a desired number of pixels #
# along the long edge. The appropriate tiles are then automatically downloaded #
# and stitched together into a single map. #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###################################################################################
import sys
import os
import urllib
import urllib2
import math
import wx
import wx.html
import threading
import Queue
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFilter
from PIL import ImageMath
#########################################################################################################
# Current Google map URLS
# If the script reports that map URLS are invalid, replace the code section below
# with the updated version from http://www.jportsmouth.com/code/Stitch/stitch.html
##################### start of map URL code section ###################################################
NRM_URL = "http://mt0.google.com/vt/lyrs=m@134&hl=en&src=api&x=0&y=0&z=0&s="
SAT_URL = "http://khm0.google.com/kh/v=72&x=0&y=0&z=0&s=&token=122724"
PHY_URL = "http://mt0.google.com/vt/lyrs=t@125,r@134&hl=en&src=api&x=0&y=0&z=0&s="
SKY_URL = "http://mw1.google.com/mw-planetary/sky/skytiles_v1/0_0_0.jpg"
###################### end of map URL code section ####################################################
# Queue. We drop all the urls in this queue
grabPool = Queue.Queue( 0 )
# Background threads. We start a few of these
class ThreadingClass( threading.Thread ):
def __init__(self):
self._stopevent = threading.Event()
threading.Thread.__init__(self)
def join(self, timeout=None):
self._stopevent.set()
threading.Thread.join(self, timeout)
def run( self ):
self.serverSelectCounter = 0
# Run until termination event. After filling the queue, we can just wait until the queue is empty.
while not self._stopevent.isSet():
try:
tile = grabPool.get(True, 0.001)
except:
continue
url = tile[0]
output = './tiles/tile_' + tile[1] + '.jpg'
# Fixing up url for servers that had %s writted into them for load balancing
gotTile = False
if( url.find( "%s" ) != -1 ):
for x in range(4):
url = url % ( self.serverSelectCounter % 4 )
self.serverSelectCounter = self.serverSelectCounter + 1
gotTile = self.download(url, output)
if gotTile: break
# Otherwise url does not need to be fixed up
else:
gotTile = self.download(url, output)
if (gotTile != True):
print "(Map URL " + url + " might be invalid or a server might be down. Visit http://www.jportsmouth.com/code/Stitch/stitch.html and update the map URL code section)"
grabPool.task_done()
def download( self, url, output ):
try:
urllib.urlretrieve( url, output )
return True
except:
return False
class StitchedMap:
def __init__(self, lat, lon, res, zoom, maptype):
self.lat = lat
self.lon = lon
self.latVal = (float(lat[0]), float(lat[1]))
self.lonVal = (float(lon[0]), float(lon[1]))
if (self.latVal[0] >= self.latVal[1]):
print 'Invalid latitude range. Aborting.'
return
if (self.lonVal[0] >= self.lonVal[1]):
print 'Invalid longitude range. Aborting.'
return
self.res = res
self.zoom = zoom # understood to be -1 if resolution specified
self.maptype = maptype
self.MAP_MODE_PREFIX = self.makeDummyUrl(NRM_URL.split('&')[0])
self.SAT_MODE_PREFIX = self.makeDummyUrl(SAT_URL.split('&')[0])
self.PHY_MODE_PREFIX = self.makeDummyUrl(PHY_URL.split('&')[0])
self.SKY_MODE_PREFIX = SKY_URL.replace('0_0_0.jpg','')
def makeDummyUrl(self, url):
# Some string hacking to replace e.g. "http://mt0.google.com..." with "http://mt%s.google.com..."
# so that later we can replace %s with an integer 0-4 for load balancing
server_url = url.split(".google")
server_name = server_url[0][0:len(server_url[0])-1]
dummy_url = server_name + "%s.google" + server_url[1]
return dummy_url
def generate(self):
c0 = "(" + self.lat[0] + ", " + self.lon[0] + ")"
c1 = "(" + self.lat[1] + ", " + self.lon[1] + ")"
print '\n######################################################################'
print "Making " + self.maptype + " map defined by (lat, lon) corners " + c0 + " and " + c1
EX = math.fabs(float(self.lon[1]) - float(self.lon[0]))
EY = math.fabs(float(self.lat[1]) - float(self.lat[0]))
print 'Requested map (lng, lat) size in degrees is: ', str(EX), str(EY)
# compute which 256x256 tiles we need to download
self.computeTileMatrix()
if (self.zoom<0) or (self.zoom>19):
print 'Invalid zoom level (' + str(self.zoom) + '). Aborting.'
return
print 'Zoom level: ', str(self.zoom)
# Connect to Google maps and download tiles
self.download()
# Finally stitch the downloaded maps together into the final big map
return self.stitch()
def computeTileRange(self):
if self.zoom == -1:
# find a zoom level which gives approximately the desired number of pixels along the long edge
EX = math.fabs(float(self.lon[1]) - float(self.lon[0]))
EY = math.fabs(float(self.lat[1]) - float(self.lat[0]))
aspect = 2.0*EY/EX
ntiles_x = 0
ntiles_y = 0
if (EX>EY):
self.ntiles_x = long( float(self.res)/256 + 1 )
self.ntiles_y = long( aspect*float(self.res)/256 + 1 )
else:
self.ntiles_y = long( float(self.res)/256 + 1 )
self.ntiles_x = long( float(self.res)/(aspect*256) + 1 )
log2of10 = 3.321928094887362
self.zoom = log2of10 * math.log10( max(self.ntiles_x, self.ntiles_y) * 360.0/max(EX, EY) )
self.zoom = long(self.zoom)
# In satellite mode, the zoom level in the html query goes from 0 to 14 inclusive,
# 0 being the lowest res (i.e. the map of the world).
# In the other modes, the zoom level goes from -2 to 17 inclusive, 17 being the map of the world.
if (self.maptype != 'satellite'):
self.htmlzoom = 17 - self.zoom
# Google maps uses the Mercator projection, so we need to convert the given latitudes
# into Mercator y-coordinates. Google takes the vertical edges of the map to be at
# y = +/-pi, corresponding to latitude +/-85.051128.
# It is convenient therefore to compute y/2 for each latitude. We can then
# just use the y coord as if it were a latitude, with the top edges at +/-90.0 "degrees".
l0 = self.latVal[0]
l1 = self.latVal[1]
self.yVal = (self.latitudeToMercator(l0), self.latitudeToMercator(l1))
# get the corner tile
tileA = self.getTile(self.lonVal[0], self.yVal[0])
tileB = self.getTile(self.lonVal[1], self.yVal[1])
return [tileA, tileB]
# Allow phi in range [-90.0, 90.0], return in same range
def latitudeToMercator(self, phi):
# If the given latitude falls outside of the +/-85.051128 range, we clamp it back into range.
phimax = 85.05112
if phi>phimax: phi = phimax
elif phi<-phimax: phi = -phimax
# find sign
sign = 0.0
if phi>=0.0: sign = 1.0
else: sign = -1.0
# convert to rad
phi *= math.pi/180.0
# make positive for Mercator formula
phi = math.fabs(phi)
# find [0,pi] range Mercator coords
y = math.log( math.tan(phi) + 1.0/math.cos(phi) )
# put back sign and scale by factor of 2
y *= 0.5*sign
# convert to degrees
y *= 180.0/math.pi
# clamp to [-90.0, 90.0]
if y>90.0: y = 90.0
elif y<-90.0: y = -90.0
return y
def computeTileMatrix(self):
tileRange = self.computeTileRange()
tileA = tileRange[0]
tileB = tileRange[1]
tileAstr = '(' + str(tileA[0]) + ',' + str(tileA[1]) + ')'
tileBstr = '(' + str(tileB[0]) + ',' + str(tileB[1]) + ')'
print 'Corner tile indices: ' + tileAstr + ', ' + tileBstr
self.nX = abs(tileB[0] - tileA[0]) + 1
self.nY = abs(tileB[1] - tileA[1]) + 1
print 'Total number of tiles to download: ' + str(self.nX*self.nY)
# Make a nX*nY matrix of the tiles (i,j) we need, with (0,0) in the lower-left.
# The google tile indices (lng, lat) corresponding to (i,j) (at the given zoom level) are stored
# in each tile.
# We need the fact that in satellite mode, the lng, lat tile indices increase with both longitude
# and latitude, but in the other modes, the lat index decreases with latitude
self.tiles = []
for i in range(0, self.nX):
lng = tileA[0] + i
column = []
for j in range(0, self.nY):
lat = 0
if self.maptype == 'satellite':
lat = tileA[1] + j
code = self.genSatelliteTileCode(lng, lat)
else:
lat = tileA[1] - j
code = ''
status = True
tile = [lng, lat, code, status]
column.append(tile)
self.tiles.append(column)
def checkURL(self, url):
try:
urllib2.urlopen(url).read()
except:
return False
return True
def download(self):
if os.path.exists("./tiles") != True:
os.mkdir("./tiles")
print ''
n = 1
for column in self.tiles:
for tile in column:
tilePath = './tiles/tile_' + self.makeIdentifier(tile) + '.jpg'
# If the tile with the expected identifier suffix already exists in the tiles directory,
# assume that is the one we want (allows execution to continue later if interrupted).
if os.path.exists(tilePath):
print 'Using existing tile ' + str(n) + '/' + str(self.nX*self.nY) + (
', (i, j) = (' + str(tile[0]) + ',' + str(tile[1]) + ')' )
else:
mapurl = ''
if self.maptype == 'map': mapurl = self.gen_MAP_URL(tile)
elif self.maptype == 'satellite': mapurl = self.gen_SAT_URL(tile)
elif self.maptype == 'terrain': mapurl = self.gen_PHY_URL(tile)
elif self.maptype == 'sky': mapurl = self.gen_SKY_URL(tile)
else:
print 'Unknown map type! Quitting. Humph'
sys.exit()
if mapurl:
print 'Downloading tile ' + str(n) + '/' + str(self.nX*self.nY) + ', (i, j) = (' + str(tile[0]) + ',' + str(tile[1]) + ')'
grabPool.put( [ mapurl, self.makeIdentifier(tile) ] )
else:
print 'Tile ' + str(n) + '/' + str(self.nX*self.nY) + (
', (i, j) = (' + str(tile[0]) + ',' + str(tile[1]) + ') is not stored by Google, and will be rendered black')
tile[3] = False
n += 1
grabPool.join()
def makeIdentifier(self, tile):
identifier = self.maptype + '_' + str(self.zoom) + '_'
if self.maptype == 'satellite':
identifier += tile[2]
else:
identifier += str(tile[0]) + '_' + str(tile[1])
return identifier
def getTile(self, lng, lat):
nTile = 1 << self.zoom
# note, assume ranges are lng = (-180,180), lat = (-90,90)
tilex = long(nTile * (float(lng) + 180.0)/360.0)
tiley = long(nTile * (float(lat) + 90.0 )/180.0)
if tilex == nTile: tilex -= 1
if tilex<0: tilex = 0
if tiley == nTile: tiley -= 1
if tiley<0: tiley = 0
# the hybrid and terrain modes index the tiles descending with latitude
if self.maptype != 'satellite':
tiley = nTile - 1 - tiley
tile = (tilex, tiley)
return tile
def gen_MAP_URL(self, tile):
x = str(tile[0])
y = str(tile[1])
url = self.MAP_MODE_PREFIX + '&x=' + x + '&y=' + y + '&zoom=' + str(self.htmlzoom)
return url
def gen_SAT_URL(self, tile):
code = tile[2]
url = self.SAT_MODE_PREFIX + '&t=' + code
return url
def gen_PHY_URL(self, tile):
x = str(tile[0])
y = str(tile[1])
url = self.PHY_MODE_PREFIX + '&x=' + x + '&y=' + y + '&zoom=' + str(self.htmlzoom)
return url
def gen_SKY_URL(self, tile):
x = str(tile[0])
y = str(tile[1])
url = self.SKY_MODE_PREFIX + x + '_' + y + '_' + str(self.zoom) + '.jpg'
return url
def convertToBinary(self, x, n):
b = ''
for i in range(0,n):
b = str((x >> i) & 1) + b
return b
def genSatelliteTileCode(self, x, y):
# In satellite mode, the tiles are indexed by a sequence of the letters q, r, s, t, where
# there are 4^zoom tiles to index at each level. This works as indicated below:
#
# zoom 0 zoom1 zoom 2 etc...
#
# t tq tr tqq tqr trq trr
# tt ts tqt tqs trt trs
#
# ttq ttr tsq tsr
# ttt tts tst tss
nTile = 1 << self.zoom
if ((y < 0) or (nTile-1 < y)):
return 'x'
if ((x < 0) or (nTile-1 < x)):
x = x % nTile
if (x < 0):
x += nTile;
c = 't'
# convert each to zoom-digit binary representation
bx = self.convertToBinary(x, self.zoom)
by = self.convertToBinary(y, self.zoom)
# q r s t
# left(0)/right(1) (x) 0 1 1 0
# down(0)/up(1) (y) 1 1 0 0
for i in range(0, self.zoom):
if (bx[i] == '0'):
if(by[i] == '0'):
c += 't'
else:
c += 'q'
else:
if(by[i] == '0'):
c += 's'
else:
c += 'r'
return c
def getCoordsOfTile(self, tile):
nTile = 1 << self.zoom
width = 360.0/float(nTile)
height = 180.0/float(nTile)
tiley = tile[1]
if self.maptype != 'satellite':
tiley = nTile - 1 - tiley
X = -180.0 + float(tile[0]) * width
Y = -90.0 + float(tiley) * height
# coords of corners of tile
LL = (X, Y)
UR = (X+width, Y+height)
return [LL, UR]
def crop(self, Map):
# Crop off the excess space.
# Get (lat, lon) in degrees of corners of image
tileA = self.tiles[0][0]
coordsA = self.getCoordsOfTile(tileA)
tileB = self.tiles[self.nX-1][self.nY-1]
coordsB = self.getCoordsOfTile(tileB)
LL = (coordsA[0][0], coordsA[0][1])
UR = (coordsB[1][0], coordsB[1][1])
# (ax, ay) and (bx, by) are the image coords of the corners of the desired map:
ax = (self.lonVal[0] - LL[0]) / (UR[0] - LL[0])
ay = (self.yVal[0] - LL[1]) / (UR[1] - LL[1])
bx = (self.lonVal[1] - LL[0]) / (UR[0] - LL[0])
by = (self.yVal[1] - LL[1]) / (UR[1] - LL[1])
ax = int(self.pX * ax)
ay = int(self.pY * (1.0-ay))
bx = int(self.pX * bx)
by = int(self.pY * (1.0-by))
#clamp to be safe
if ax>=self.pX: ax=self.pX-1;
if ax<0: ax=0;
if bx>=self.pX: bx=self.pX-1;
if bx<0: bx=0;
if ay>=self.pY: ay=self.pY-1;
if ay<0: ay=0;
if by>=self.pY: by=self.pY-1;
if by<0: by=0;
box = [ax, by, bx, ay]
return Map.crop(box)
def stitch(self):
print '\nStitching tiles'
self.pX = 256 * self.nX
self.pY = 256 * self.nY
mode = "RGB"
Map = Image.new(mode, (self.pX, self.pY))
for i in range(0, self.nX):
for j in range(0, self.nY):
tile = self.tiles[i][j]
if tile[3] == False:
continue
path = './tiles/tile_' + self.makeIdentifier(tile) + '.jpg'
# pixel coords of top left corner of this tile
cX = 256 * i
cY = self.pY - 256 * (j+1)
im = Image.open(path)
Map.paste(im, (cX, cY))
cropMap = self.crop(Map)
# give the map file a semi-unique name, derived from the lower-left tile coords
mappath = './stitched_' + self.makeIdentifier(self.tiles[0][0]) + '.jpg'
cropMap.save(mappath)
print 'Saved stitched map ' + mappath
print 'Finished.'
return mappath
############################ wxPython GUI interface ############################
# Frame dimensions
wX = 360
wY = 430
# border width
bW = 20
# coord panel height
hY = 180
class MainPanel(wx.Panel):
def OnSetFocus(self, evt):
print "OnSetFocus"
evt.Skip()
def OnKillFocus(self, evt):
print "OnKillFocus"
evt.Skip()
def OnWindowDestroy(self, evt):
print "OnWindowDestroy"
evt.Skip()
def __init__(self, parent, id):
self.parent = parent
pos = wx.Point(bW,bW)
size = wx.Size(wX-2*bW, hY)
hspace = 4
wx.Panel.__init__(self, parent, -1, pos, size)
# Lat/Lng direct entry section
heading_LL = wx.StaticText(self, -1, "Lower left")
heading_UR = wx.StaticText(self, -1, "Upper right")
fW = 125
lat_label = wx.StaticText(self, -1, "Latitude")
lon_label = wx.StaticText(self, -1, "Longitude")
self.latLL_text = wx.TextCtrl(self, -1, "-90.0", size=(fW, -1))
self.latLL_text.SetInsertionPoint(0)
self.Bind( wx.EVT_TEXT, self.EvtTextChanged, self.latLL_text)
self.lonLL_text = wx.TextCtrl(self, -1, "-180.0", size=(fW, -1))
self.lonLL_text.SetInsertionPoint(0)
self.Bind( wx.EVT_TEXT, self.EvtTextChanged, self.lonLL_text)
self.latUR_text = wx.TextCtrl(self, -1, "90.0", size=(fW, -1))
self.latUR_text.SetInsertionPoint(0)
self.Bind( wx.EVT_TEXT, self.EvtTextChanged, self.latUR_text)
self.lonUR_text = wx.TextCtrl(self, -1, "180.0", size=(fW, -1))
self.lonUR_text.SetInsertionPoint(0)
self.Bind( wx.EVT_TEXT, self.EvtTextChanged, self.lonUR_text)
coord_sizer = wx.FlexGridSizer(cols=3, hgap=4*hspace, vgap=2*hspace)
coord_sizer.AddMany([ (0, 0), heading_LL, heading_UR,
lat_label, self.latLL_text, self.latUR_text,
lon_label, self.lonLL_text, self.lonUR_text,
(0, 0), (0,0), (0,0) ])
# Lat/Lng code entry section
code_label = wx.StaticText(self, -1, "Code: ")
self.coordCode = wx.TextCtrl(self, -1, "", size=(fW*2, -1))
self.coordCode.SetInsertionPoint(0)
self.Bind( wx.EVT_TEXT, self.EvtTextChanged, self.coordCode)
useCode_cb = wx.CheckBox(self, -1, "Use code?", wx.DefaultPosition)
self.Bind( wx.EVT_CHECKBOX, self.EvtCoordCheckBox, useCode_cb)
self.useCode = False
code_sizer = wx.FlexGridSizer(cols=2, hgap=3*hspace, vgap=3*hspace)
code_sizer.AddMany([ useCode_cb, (0,0),
code_label, self.coordCode,
(0, 0), (0,0) ])
# 'Specify resolution' option enable checkbox
self.useRes_rb = wx.RadioButton(self, -1, "Specify resolution", wx.DefaultPosition)
self.useRes_rb.SetValue(True)
self.Bind( wx.EVT_RADIOBUTTON, self.EvtResolutionRadioButton, self.useRes_rb)
self.useResolution = True
res_label = wx.StaticText(self, -1, "Approx. number of pixels: ")
self.res_text = wx.TextCtrl(self, -1, "512", size=(fW/2, -1))
self.res_text.SetInsertionPoint(0)
self.Bind( wx.EVT_TEXT, self.EvtTextChanged, self.res_text)
res_sizer = wx.FlexGridSizer(cols=1, hgap=3*hspace, vgap=hspace)
res_sizer.AddMany([ self.useRes_rb, (0,0) ])
res_sizer = wx.FlexGridSizer(cols=2, hgap=3*hspace, vgap=3*hspace)
res_sizer.AddMany([ self.useRes_rb, (0,0),
res_label, self.res_text,
(0, 0), (0,0) ])
# 'Specify zoom level' option enable checkbox and entry
self.useZoom_rb = wx.RadioButton(self, -1, "Specify zoom level", wx.DefaultPosition)
self.useZoom_rb.SetValue(False)
self.Bind( wx.EVT_RADIOBUTTON, self.EvtZoomRadioButton, self.useZoom_rb)
self.useZoomLevel = False
self.zoomInfo_label = wx.StaticText(self, -1, "(lowest = 0, highest = 19)")
zoom_label = wx.StaticText(self, -1, "Zoom level: ")
self.zoomLevel_text = wx.TextCtrl(self, -1, "5", size=(fW/2, -1))
self.zoomLevel_text.SetInsertionPoint(0)
self.Bind( wx.EVT_TEXT, self.EvtTextChanged, self.zoomLevel_text)
self.zoomLevel_text.Enable(False)
zoom_sizer = wx.FlexGridSizer(cols=2, hgap=3*hspace, vgap=3*hspace)
zoom_sizer.AddMany([ self.useZoom_rb, self.zoomInfo_label,
zoom_label, self.zoomLevel_text,
(0, 0), (0,0) ])
# Map type selection radio box
self.radioList = ['map', 'satellite', 'terrain', 'sky']
rb = wx.RadioBox(self, -1, "Map type", wx.DefaultPosition, wx.DefaultSize,
self.radioList, 3, wx.RA_SPECIFY_COLS)
self.Bind( wx.EVT_RADIOBOX, self.EvtRadioBox, rb)
self.maptype = 'map'
rbsizer = wx.BoxSizer(wx.HORIZONTAL)
rbsizer.Add(rb, 0, wx.GROW|wx.ALL, hspace)
# Tiles info
self.tilesInfo_label = wx.StaticText(self, -1, '', size=(fW, -1))
ziFont = wx.Font(10, wx.NORMAL, wx.NORMAL, wx.BOLD, False, u'Courier')
self.tilesInfo_label.SetFont(ziFont)
# Run button
b = wx.Button(self, -1, "Run")
self.Bind(wx.EVT_BUTTON, self.OnRun, b)
bsizer = wx.BoxSizer(wx.HORIZONTAL)
bsizer.Add(b, 0, wx.GROW|wx.ALL, hspace)
# UI layout
border = wx.BoxSizer(wx.VERTICAL)
border.Add(coord_sizer, 0, wx.GROW)
border.Add(code_sizer, 0, wx.GROW)
border.Add(res_sizer, 0, wx.GROW)
border.Add(zoom_sizer, 0, wx.GROW)
border.Add(rbsizer, 0, wx.GROW)
border.AddSpacer(15)
border.Add(self.tilesInfo_label, 0, wx.GROW)
border.AddSpacer(5)
border.Add(bsizer, 0, wx.GROW)
self.SetSizer(border)
self.SetAutoLayout(True)
border.Fit(self)
self.updateMapParams()
def updateMapParams(self):
lat = None
lon = None
if self.useCode == True:
coords = self.coordCode.GetValue().split('_')
if len(coords) != 4:
print 'Code cannot be parsed into coordinates, unable to generate map.'
return False
# Ensure that the 0th corner is lower left, even if the user didn't make it so
try:
lat = (coords[1], coords[3])
if float(lat[1]) < float(lat[0]):
lat = (coords[3], coords[1])
lon = (coords[0], coords[2])
if float(lon[1]) < float(lon[0]):
lon = (coords[2], coords[0])
except:
print 'Code cannot be parsed into coordinates, unable to generate map.'
return False
else:
# Ensure that the 0th corner is lower left, even if the user didn't make it so
lat = (self.latLL_text.GetValue(), self.latUR_text.GetValue())
try:
if float(lat[1]) < float(lat[0]):
lat = (self.latUR_text.GetValue(), self.latLL_text.GetValue())
lon = (self.lonLL_text.GetValue(), self.lonUR_text.GetValue())
if float(lon[1]) < float(lon[0]):
lon = ( self.lonUR_text.GetValue(), self.lonLL_text.GetValue())
except:
print 'Invalid longitude/latitude values, unable to generate map.'
return False
zoomLevel = -1
if self.useZoom_rb.GetValue():
try:
zoomLevel = int(self.zoomLevel_text.GetValue())
except:
pass
res = 0
if self.useRes_rb.GetValue():
try:
res = int(self.res_text.GetValue())
except:
pass
self.gmap = StitchedMap(lat, lon, res, zoomLevel, self.maptype)
tileRange = self.gmap.computeTileRange()
tileA = tileRange[0]
tileB = tileRange[1]
nX = abs(tileB[0] - tileA[0]) + 1
nY = abs(tileB[1] - tileA[1]) + 1
tileinfo = ' Will download ' + str(nX*nY) + ' tiles: (' + str(tileA[0]) + ',' + str(tileA[1]) + ') to (' + str(tileB[0]) + ',' + str(tileB[1]) + ')'
self.tilesInfo_label.SetLabel(tileinfo)
return True
def OnRun(self, evt):
if self.updateMapParams():
self.gmap.generate()
def EvtRadioBox(self, event):
maptype = ''
i = event.GetInt()
if i == 0: self.maptype = 'map'
elif i == 1: self.maptype = 'satellite'
elif i == 2: self.maptype = 'terrain'
else: self.maptype = 'sky'
self.updateMapParams()
def EvtCoordCheckBox(self, event):
self.useCode = event.Checked()
if self.useCode:
self.coordCode.Enable(True)
else:
self.coordCode.Enable(False)
self.updateMapParams()
def EvtResolutionRadioButton(self, event):
self.zoomLevel_text.Enable(False)
self.res_text.Enable(True)
self.updateMapParams()
def EvtZoomRadioButton(self, event):
self.zoomLevel_text.Enable(True)
self.res_text.Enable(False)
self.updateMapParams()
def EvtTextChanged(self, event):
if self.useZoom_rb.GetValue():
zoomLevel = 0
try:
zoomLevel = int(self.zoomLevel_text.GetValue())
except:
pass
minZoom = 0
maxZoom = 19
if (zoomLevel<minZoom):
zoomLevel = minZoom
self.zoomLevel_text.SetValue(str(zoomLevel))
if (zoomLevel>maxZoom):
zoomLevel = maxZoom
self.zoomLevel_text.SetValue(str(zoomLevel))
self.updateMapParams()
class MainWindow(wx.Frame):
def __init__(self, parent, id, title):
N = 10
print "*************** Stitch v3.0 ***************"
print "Starting " + str(N) + " download threads"
self.threads = []
for x in range(N):
thread = ThreadingClass()
thread.start()
self.threads.append(thread)
wx.Frame.__init__(self, parent, wx.ID_ANY, title, wx.DefaultPosition, wx.Size(wX, wY))
controlPanel = MainPanel(self, -1)
def __del__(self):
for thread in self.threads:
thread.join()
print "Terminated download threads. Quitting."
# Entry point
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = MainWindow(None, -1, "Stitch")
frame.Show(True)
app.MainLoop()
| Python |
import urllib
import cStringIO
import Image
import math
def get_static_google_map(filename_wo_extension, center=None, zoom=None, imgsize="500x500", imgformat="jpeg",
maptype="roadmap", markers=None ):
"""retrieve a map (image) from the static google maps server
See: http://code.google.com/apis/maps/documentation/staticmaps/
Creates a request string with a URL like this:
http://maps.google.com/maps/api/staticmap?center=Brooklyn+Bridge,New+York,NY&zoom=14&size=512x512&maptype=roadmap
&markers=color:blue|label:S|40.702147,-74.015794&sensor=false"""
# assemble the URL
request = "http://maps.google.com/maps/api/staticmap?" # base URL, append query params, separated by &
# if center and zoom are not given, the map will show all marker locations
if center != None:
request += "center=%s&" % center
#request += "center=%s&" % "40.714728, -73.998672" # latitude and longitude (up to 6-digits)
#request += "center=%s&" % "50011" # could also be a zipcode,
#request += "center=%s&" % "Brooklyn+Bridge,New+York,NY" # or a search term
if center != None:
request += "zoom=%i&" % zoom # zoom 0 (all of the world scale ) to 22 (single buildings scale)
request += "size=%ix%i&" % (imgsize) # tuple of ints, up to 640 by 640
request += "format=%s&" % imgformat
request += "maptype=%s&" % maptype # roadmap, satellite, hybrid, terrain
# add markers (location and style)
if markers != None:
for marker in markers:
request += "%s&" % marker
#request += "mobile=false&" # optional: mobile=true will assume the image is shown on a small screen (mobile device)
request += "sensor=false&" # must be given, deals with getting loction from mobile device
print request
urllib.urlretrieve(request, filename_wo_extension+"."+imgformat) # Option 1: save image directly to disk
# Option 2: read into PIL
web_sock = urllib.urlopen(request)
imgdata = cStringIO.StringIO(web_sock.read()) # constructs a StringIO holding the image
try:
PIL_img = Image.open(imgdata)
# if this cannot be read as image that, it's probably an error from the server,
except IOError:
print "IOError:", imgdata.read() # print error (or it may return a image showing the error"
# show image
#else:
#PIL_img.show()
#PIL_img.save(filename_wo_extension+".png", "PNG") # save as jpeg
offset=268435456
offset=float(offset)
radius=offset/math.pi
def LToX(x):
return round(offset+radius*x*math.pi/180)
def LToY(y):
return round(offset-radius*math.log((1+math.sin(y*math.pi/180))/(1-math.sin(y*math.pi/180)))/2)
def XToL(x):
return ((round(x)-offset)/radius)*180/math.pi
def YToL(y):
return (math.pi/2-2*math.atan(math.exp((round(y)-offset)/radius)))*180/math.pi
# X = X pixel offset of new map center from old map center
# Y = Y pixel offset of new map center from old map center
# x = Longitude of map center
# y = Latitude of map center
# z = Zoom level
# result.x = Longitude of adjusted map center
# result.y = Latitude of adjusted map center
def adjust(X=0,Y=0,x=52.38,y=4,z=12):
newX = XToL(LToX(x) + (X<<(20-z)))
newY = YToL(LToY(y) + (Y<<(21-z)))
return (newX,newY)
if __name__ == '__main__':
# make a map around a center
get_static_google_map("amsterdam", center="52.38,4.90", zoom=12, imgsize=(800,800),
imgformat="png", maptype="satellite" )
| Python |
from pythonmap import *
# Coordinates of center
center = ("52.38","4.90")
# Zoom level
zoom = 12
# Size of returned map
mapsize = (640,640)
# Download and save map
get_static_google_map("file_name", "%s,%s"%center, zoom, mapsize, imgformat="jpg", maptype="satellite")
# Bounding box coordinates
lowerRight = adjust(-mapsize[0]/2-50,-mapsize[0]/2,float(center[0]),float(center[1]),zoom)
upperLeft = adjust(mapsize[1]/2+50,mapsize[1]/2,float(center[0]),float(center[1]),zoom)
| Python |
# Collect Flickr Data with Python FlickrAPI
# flickrapi path is required to be added to system path first
# Scientific Visulization and Virtual Reality
# Wouter Josemans and Ninghang Hu
# Oct 21, 2010
import time
import datetime
import sys;
sys.path.append('flickrapi-1.4.2./')
import flickrapi
sys.path.append('GetGoogleMap/')
from pythonmap import *
# Map Parameters
filename = 'AmsterdamFlickr'
zoom = 10
START = datetime.datetime(2005,01,01)
END = datetime.datetime(2010,10,19)
delta = datetime.timedelta(days=7)
# Find bounding box lat/lon coordinates of map
bboxP = "4.810638427734375,52.3154051429798,4.9774932861328125,52.3923633970718"
lat = ("52.3154051429798","52.3923633970718")
lon = ("4.810638427734375","4.9774932861328125")
# Download google map
#gmap = stitch.StitchedMap(lat,lon,-1,zoom,'satellite')
#map_name = gmap.generate()
#print "google map completed"
# start flickr
flickr = flickrapi.FlickrAPI('6b03a335bca86810f4ba931e8ef646ed')
file1 = open(filename+'.txt','w')
# search flickr photos for each time span
week = 1
start_date = START
end_date = start_date + delta
while end_date < END :
print '-----------'+str(week)+'/'+str((END-START).days/7)+'-----------'
# get number of pages/perpage
photos=flickr.photos_search(min_taken_date="%s-%s-%s" % (start_date.year,start_date.month,start_date.day),\
max_taken_date="%s-%s-%s" % (end_date.year,end_date.month,end_date.day),\
bbox=bboxP,\
page='1',\
perpage='250',\
extras='geo,date_taken')
# print debug info
photos = photos[0]
pages = int(photos.attrib['pages'])
perpage = int(photos.attrib['perpage'])
# print "Total pages:",pages
# print "Total number:",photos.attrib['total']
# print "Photos per Page",perpage
# get data per page
iter = 1
while iter <= min(pages,16):
try:
print "processing page",iter,"/",pages
# print len(ID)
# if len(ID) != 0:
# print "unique items:" ,len(set(ID))\
# ,"/",len(ID),"/",100*len(set(ID))/len(ID),"%"
#else:
# print "unique items:" ,len(set(ID))\
# ,"/",len(ID)
photos = flickr.photos_search(\
min_taken_date="%s-%s-%s" % (start_date.year,start_date.month,start_date.day),\
max_taken_date="%s-%s-%s" % (end_date.year,end_date.month,end_date.day),\
bbox = bboxP,\
page = str(iter),\
perpage='250',\
extras='geo,date_taken')
photos = photos[0]
print len(photos)
print "Total number:",photos.attrib['total']
if len(photos) == 0:
print "0 item found, pass"
else:
for photo in photos:
file1.write(photo.attrib['id']+';'+photo.attrib['longitude']+';'+photo.attrib['latitude']+';'+photo.attrib['datetaken']+'\n')
iter += 1
#time.sleep(2)
except:
print "connection error... getting current page again"
time.sleep(10)
start_date = start_date + delta
end_date = start_date + delta
week = week + 1
file1.close()
| Python |
'''Persistent token cache management for the Flickr API'''
import os.path
import logging
import time
from flickrapi.exceptions import LockingError
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
__all__ = ('TokenCache', 'SimpleTokenCache')
class SimpleTokenCache(object):
'''In-memory token cache.'''
def __init__(self):
self.token = None
def forget(self):
'''Removes the cached token'''
self.token = None
class TokenCache(object):
'''On-disk persistent token cache for a single application.
The application is identified by the API key used. Per
application multiple users are supported, with a single
token per user.
'''
def __init__(self, api_key, username=None):
'''Creates a new token cache instance'''
self.api_key = api_key
self.username = username
self.memory = {}
self.path = os.path.join("~", ".flickr")
def get_cached_token_path(self):
"""Return the directory holding the app data."""
return os.path.expanduser(os.path.join(self.path, self.api_key))
def get_cached_token_filename(self):
"""Return the full pathname of the cached token file."""
if self.username:
filename = 'auth-%s.token' % self.username
else:
filename = 'auth.token'
return os.path.join(self.get_cached_token_path(), filename)
def get_cached_token(self):
"""Read and return a cached token, or None if not found.
The token is read from the cached token file.
"""
# Only read the token once
if self.username in self.memory:
return self.memory[self.username]
try:
f = open(self.get_cached_token_filename(), "r")
token = f.read()
f.close()
return token.strip()
except IOError:
return None
def set_cached_token(self, token):
"""Cache a token for later use."""
# Remember for later use
self.memory[self.username] = token
path = self.get_cached_token_path()
if not os.path.exists(path):
os.makedirs(path)
f = open(self.get_cached_token_filename(), "w")
f.write(token)
f.close()
def forget(self):
'''Removes the cached token'''
if self.username in self.memory:
del self.memory[self.username]
filename = self.get_cached_token_filename()
if os.path.exists(filename):
os.unlink(filename)
token = property(get_cached_token, set_cached_token, forget, "The cached token")
class LockingTokenCache(TokenCache):
'''Locks the token cache when reading or updating it, so that
multiple processes can safely use the same API key.
'''
def get_lock_name(self):
'''Returns the filename of the lock.'''
token_name = self.get_cached_token_filename()
return '%s-lock' % token_name
lock = property(get_lock_name)
def get_pidfile_name(self):
'''Returns the name of the pidfile in the lock directory.'''
return os.path.join(self.lock, 'pid')
pidfile_name = property(get_pidfile_name)
def get_lock_pid(self):
'''Returns the PID that is stored in the lock directory, or
None if there is no such file.
'''
filename = self.pidfile_name
if not os.path.exists(filename):
return None
pidfile = open(filename)
try:
pid = pidfile.read()
if pid:
return int(pid)
finally:
pidfile.close()
return None
def acquire(self, timeout=60):
'''Locks the token cache for this key and username.
If the token cache is already locked, waits until it is
released. Throws an exception when the lock cannot be acquired
after ``timeout`` seconds.
'''
# Check whether there is a PID file already with our PID in
# it.
lockpid = self.get_lock_pid()
if lockpid == os.getpid():
LOG.debug('The lock is ours, continuing')
return
# Figure out the lock filename
lock = self.get_lock_name()
LOG.debug('Acquiring lock %s' % lock)
# Try to obtain the lock
start_time = time.time()
while True:
try:
os.makedirs(lock)
break
except OSError:
# If the path doesn't exist, the error isn't that it
# can't be created because someone else has got the
# lock. Just bail out then.
if not os.path.exists(lock):
LOG.error('Unable to acquire lock %s, aborting' %
lock)
raise
if time.time() - start_time >= timeout:
# Timeout has passed, bail out
raise LockingError('Unable to acquire lock ' +
'%s, aborting' % lock)
# Wait for a bit, then try again
LOG.debug('Unable to acquire lock, waiting')
time.sleep(0.1)
# Write the PID file
LOG.debug('Lock acquired, writing our PID')
pidfile = open(self.pidfile_name, 'w')
try:
pidfile.write('%s' % os.getpid())
finally:
pidfile.close()
def release(self):
'''Unlocks the token cache for this key.'''
# Figure out the lock filename
lock = self.get_lock_name()
if not os.path.exists(lock):
LOG.warn('Trying to release non-existing lock %s' % lock)
return
# If the PID file isn't ours, abort.
lockpid = self.get_lock_pid()
if lockpid and lockpid != os.getpid():
raise LockingError(('Lock %s is NOT ours, but belongs ' +
'to PID %i, unable to release.') % (lock, lockpid))
LOG.debug('Releasing lock %s' % lock)
# Remove the PID file and the lock directory
pidfile = self.pidfile_name
if os.path.exists(pidfile):
os.remove(pidfile)
os.removedirs(lock)
def __del__(self):
'''Cleans up any existing lock.'''
# Figure out the lock filename
lock = self.get_lock_name()
if not os.path.exists(lock):
return
# If the PID file isn't ours, we're done
lockpid = self.get_lock_pid()
if lockpid and lockpid != os.getpid():
return
# Release the lock
self.release()
def locked(method):
'''Decorator, ensures the method runs in a locked cache.'''
def locker(self, *args, **kwargs):
self.acquire()
try:
return method(self, *args, **kwargs)
finally:
self.release()
return locker
@locked
def get_cached_token(self):
"""Read and return a cached token, or None if not found.
The token is read from the cached token file.
"""
return TokenCache.get_cached_token(self)
@locked
def set_cached_token(self, token):
"""Cache a token for later use."""
TokenCache.set_cached_token(self, token)
@locked
def forget(self):
'''Removes the cached token'''
TokenCache.forget(self)
token = property(get_cached_token, set_cached_token, forget, "The cached token")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''A FlickrAPI interface.
The main functionality can be found in the `flickrapi.FlickrAPI`
class.
See `the FlickrAPI homepage`_ for more info.
.. _`the FlickrAPI homepage`: http://stuvel.eu/projects/flickrapi
'''
__version__ = '1.4.2'
__all__ = ('FlickrAPI', 'IllegalArgumentException', 'FlickrError',
'CancelUpload', 'XMLNode', 'set_log_level', '__version__')
__author__ = u'Sybren St\u00fcvel'.encode('utf-8')
# Copyright (c) 2007 by the respective coders, see
# http://www.stuvel.eu/projects/flickrapi
#
# This code is subject to the Python licence, as can be read on
# http://www.python.org/download/releases/2.5.2/license/
#
# For those without an internet connection, here is a summary. When this
# summary clashes with the Python licence, the latter will be applied.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import urllib
import urllib2
import os.path
import logging
import copy
import webbrowser
# Smartly import hashlib and fall back on md5
try: from hashlib import md5
except ImportError: from md5 import md5
from flickrapi.tokencache import TokenCache, SimpleTokenCache, \
LockingTokenCache
from flickrapi.xmlnode import XMLNode
from flickrapi.multipart import Part, Multipart, FilePart
from flickrapi.exceptions import *
from flickrapi.cache import SimpleCache
from flickrapi import reportinghttp
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def make_utf8(dictionary):
'''Encodes all Unicode strings in the dictionary to UTF-8. Converts
all other objects to regular strings.
Returns a copy of the dictionary, doesn't touch the original.
'''
result = {}
for (key, value) in dictionary.iteritems():
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
value = str(value)
result[key] = value
return result
def debug(method):
'''Method decorator for debugging method calls.
Using this automatically sets the log level to DEBUG.
'''
LOG.setLevel(logging.DEBUG)
def debugged(*args, **kwargs):
LOG.debug("Call: %s(%s, %s)" % (method.__name__, args,
kwargs))
result = method(*args, **kwargs)
LOG.debug("\tResult: %s" % result)
return result
return debugged
# REST parsers, {format: parser_method, ...}. Fill by using the
# @rest_parser(format) function decorator
rest_parsers = {}
def rest_parser(format):
'''Method decorator, use this to mark a function as the parser for
REST as returned by Flickr.
'''
def decorate_parser(method):
rest_parsers[format] = method
return method
return decorate_parser
def require_format(required_format):
'''Method decorator, raises a ValueError when the decorated method
is called if the default format is not set to ``required_format``.
'''
def decorator(method):
def decorated(self, *args, **kwargs):
# If everything is okay, call the method
if self.default_format == required_format:
return method(self, *args, **kwargs)
# Otherwise raise an exception
msg = 'Function %s requires that you use ' \
'ElementTree ("etree") as the communication format, ' \
'while the current format is set to "%s".'
raise ValueError(msg % (method.func_name, self.default_format))
return decorated
return decorator
class FlickrAPI(object):
"""Encapsulates Flickr functionality.
Example usage::
flickr = flickrapi.FlickrAPI(api_key)
photos = flickr.photos_search(user_id='73509078@N00', per_page='10')
sets = flickr.photosets_getList(user_id='73509078@N00')
"""
flickr_host = "api.flickr.com"
flickr_rest_form = "/services/rest/"
flickr_auth_form = "/services/auth/"
flickr_upload_form = "/services/upload/"
flickr_replace_form = "/services/replace/"
def __init__(self, api_key, secret=None, username=None,
token=None, format='etree', store_token=True,
cache=False):
"""Construct a new FlickrAPI instance for a given API key
and secret.
api_key
The API key as obtained from Flickr.
secret
The secret belonging to the API key.
username
Used to identify the appropriate authentication token for a
certain user.
token
If you already have an authentication token, you can give
it here. It won't be stored on disk by the FlickrAPI instance.
format
The response format. Use either "xmlnode" or "etree" to get a parsed
response, or use any response format supported by Flickr to get an
unparsed response from method calls. It's also possible to pass the
``format`` parameter on individual calls.
store_token
Disables the on-disk token cache if set to False (default is True).
Use this to ensure that tokens aren't read nor written to disk, for
example in web applications that store tokens in cookies.
cache
Enables in-memory caching of FlickrAPI calls - set to ``True`` to
use. If you don't want to use the default settings, you can
instantiate a cache yourself too:
>>> f = FlickrAPI(api_key='123')
>>> f.cache = SimpleCache(timeout=5, max_entries=100)
"""
self.api_key = api_key
self.secret = secret
self.default_format = format
self.__handler_cache = {}
if token:
# Use a memory-only token cache
self.token_cache = SimpleTokenCache()
self.token_cache.token = token
elif not store_token:
# Use an empty memory-only token cache
self.token_cache = SimpleTokenCache()
else:
# Use a real token cache
self.token_cache = TokenCache(api_key, username)
if cache:
self.cache = SimpleCache()
else:
self.cache = None
def __repr__(self):
'''Returns a string representation of this object.'''
return '[FlickrAPI for key "%s"]' % self.api_key
__str__ = __repr__
def trait_names(self):
'''Returns a list of method names as supported by the Flickr
API. Used for tab completion in IPython.
'''
try:
rsp = self.reflection_getMethods(format='etree')
except FlickrError:
return None
def tr(name):
'''Translates Flickr names to something that can be called
here.
>>> tr(u'flickr.photos.getInfo')
u'photos_getInfo'
'''
return name[7:].replace('.', '_')
return [tr(m.text) for m in rsp.getiterator('method')]
@rest_parser('xmlnode')
def parse_xmlnode(self, rest_xml):
'''Parses a REST XML response from Flickr into an XMLNode object.'''
rsp = XMLNode.parse(rest_xml, store_xml=True)
if rsp['stat'] == 'ok':
return rsp
err = rsp.err[0]
raise FlickrError(u'Error: %(code)s: %(msg)s' % err)
@rest_parser('etree')
def parse_etree(self, rest_xml):
'''Parses a REST XML response from Flickr into an ElementTree object.'''
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
# For Python 2.4 compatibility:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
raise ImportError("You need to install "
"ElementTree for using the etree format")
rsp = ElementTree.fromstring(rest_xml)
if rsp.attrib['stat'] == 'ok':
return rsp
err = rsp.find('err')
raise FlickrError(u'Error: %(code)s: %(msg)s' % err.attrib)
def sign(self, dictionary):
"""Calculate the flickr signature for a set of params.
data
a hash of all the params and values to be hashed, e.g.
``{"api_key":"AAAA", "auth_token":"TTTT", "key":
u"value".encode('utf-8')}``
"""
data = [self.secret]
for key in sorted(dictionary.keys()):
data.append(key)
datum = dictionary[key]
if isinstance(datum, unicode):
raise IllegalArgumentException("No Unicode allowed, "
"argument %s (%r) should have been UTF-8 by now"
% (key, datum))
data.append(datum)
md5_hash = md5(''.join(data))
return md5_hash.hexdigest()
def encode_and_sign(self, dictionary):
'''URL encodes the data in the dictionary, and signs it using the
given secret, if a secret was given.
'''
dictionary = make_utf8(dictionary)
if self.secret:
dictionary['api_sig'] = self.sign(dictionary)
return urllib.urlencode(dictionary)
def __getattr__(self, attrib):
"""Handle all the regular Flickr API calls.
Example::
flickr.auth_getFrob(api_key="AAAAAA")
etree = flickr.photos_getInfo(photo_id='1234')
etree = flickr.photos_getInfo(photo_id='1234', format='etree')
xmlnode = flickr.photos_getInfo(photo_id='1234', format='xmlnode')
json = flickr.photos_getInfo(photo_id='1234', format='json')
"""
# Refuse to act as a proxy for unimplemented special methods
if attrib.startswith('_'):
raise AttributeError("No such attribute '%s'" % attrib)
# Construct the method name and see if it's cached
method = "flickr." + attrib.replace("_", ".")
if method in self.__handler_cache:
return self.__handler_cache[method]
def handler(**args):
'''Dynamically created handler for a Flickr API call'''
if self.token_cache.token and not self.secret:
raise ValueError("Auth tokens cannot be used without "
"API secret")
# Set some defaults
defaults = {'method': method,
'auth_token': self.token_cache.token,
'api_key': self.api_key,
'format': self.default_format}
args = self.__supply_defaults(args, defaults)
return self.__wrap_in_parser(self.__flickr_call,
parse_format=args['format'], **args)
handler.method = method
self.__handler_cache[method] = handler
return handler
def __supply_defaults(self, args, defaults):
'''Returns a new dictionary containing ``args``, augmented with defaults
from ``defaults``.
Defaults can be overridden, or completely removed by setting the
appropriate value in ``args`` to ``None``.
>>> f = FlickrAPI('123')
>>> f._FlickrAPI__supply_defaults(
... {'foo': 'bar', 'baz': None, 'token': None},
... {'baz': 'foobar', 'room': 'door'})
{'foo': 'bar', 'room': 'door'}
'''
result = args.copy()
for key, default_value in defaults.iteritems():
# Set the default if the parameter wasn't passed
if key not in args:
result[key] = default_value
for key, value in result.copy().iteritems():
# You are able to remove a default by assigning None, and we can't
# pass None to Flickr anyway.
if result[key] is None:
del result[key]
return result
def __flickr_call(self, **kwargs):
'''Performs a Flickr API call with the given arguments. The method name
itself should be passed as the 'method' parameter.
Returns the unparsed data from Flickr::
data = self.__flickr_call(method='flickr.photos.getInfo',
photo_id='123', format='rest')
'''
LOG.debug("Calling %s" % kwargs)
post_data = self.encode_and_sign(kwargs)
# Return value from cache if available
if self.cache and self.cache.get(post_data):
return self.cache.get(post_data)
url = "http://" + self.flickr_host + self.flickr_rest_form
flicksocket = urllib2.urlopen(url, post_data)
reply = flicksocket.read()
flicksocket.close()
# Store in cache, if we have one
if self.cache is not None:
self.cache.set(post_data, reply)
return reply
def __wrap_in_parser(self, wrapped_method, parse_format, *args, **kwargs):
'''Wraps a method call in a parser.
The parser will be looked up by the ``parse_format`` specifier. If there
is a parser and ``kwargs['format']`` is set, it's set to ``rest``, and
the response of the method is parsed before it's returned.
'''
# Find the parser, and set the format to rest if we're supposed to
# parse it.
if parse_format in rest_parsers and 'format' in kwargs:
kwargs['format'] = 'rest'
LOG.debug('Wrapping call %s(self, %s, %s)' % (wrapped_method, args,
kwargs))
data = wrapped_method(*args, **kwargs)
# Just return if we have no parser
if parse_format not in rest_parsers:
return data
# Return the parsed data
parser = rest_parsers[parse_format]
return parser(self, data)
def auth_url(self, perms, frob):
"""Return the authorization URL to get a token.
This is the URL the app will launch a browser toward if it
needs a new token.
perms
"read", "write", or "delete"
frob
picked up from an earlier call to FlickrAPI.auth_getFrob()
"""
encoded = self.encode_and_sign({
"api_key": self.api_key,
"frob": frob,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def web_login_url(self, perms):
'''Returns the web login URL to forward web users to.
perms
"read", "write", or "delete"
'''
encoded = self.encode_and_sign({
"api_key": self.api_key,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def __extract_upload_response_format(self, kwargs):
'''Returns the response format given in kwargs['format'], or
the default format if there is no such key.
If kwargs contains 'format', it is removed from kwargs.
If the format isn't compatible with Flickr's upload response
type, a FlickrError exception is raised.
'''
# Figure out the response format
format = kwargs.get('format', self.default_format)
if format not in rest_parsers and format != 'rest':
raise FlickrError('Format %s not supported for uploading '
'photos' % format)
# The format shouldn't be used in the request to Flickr.
if 'format' in kwargs:
del kwargs['format']
return format
def upload(self, filename, callback=None, **kwargs):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
filename
name of a file to upload
callback
method that gets progress reports
title
title of the photo
description
description a.k.a. caption of the photo
tags
space-delimited list of tags, ``'''tag1 tag2 "long
tag"'''``
is_public
"1" or "0" for a public resp. private photo
is_friend
"1" or "0" whether friends can see the photo while it's
marked as private
is_family
"1" or "0" whether family can see the photo while it's
marked as private
content_type
Set to "1" for Photo, "2" for Screenshot, or "3" for Other.
hidden
Set to "1" to keep the photo in global search results, "2"
to hide from public searches.
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST.
The callback method should take two parameters:
``def callback(progress, done)``
Progress is a number between 0 and 100, and done is a boolean
that's true only when the upload is done.
"""
return self.__upload_to_form(self.flickr_upload_form,
filename, callback, **kwargs)
def replace(self, filename, photo_id, callback=None, **kwargs):
"""Replace an existing photo.
Supported parameters:
filename
name of a file to upload
photo_id
the ID of the photo to replace
callback
method that gets progress reports
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST. Defaults to the
format passed to the constructor.
The callback parameter has the same semantics as described in the
``upload`` function.
"""
if not photo_id:
raise IllegalArgumentException("photo_id must be specified")
kwargs['photo_id'] = photo_id
return self.__upload_to_form(self.flickr_replace_form,
filename, callback, **kwargs)
def __upload_to_form(self, form_url, filename, callback, **kwargs):
'''Uploads a photo - can be used to either upload a new photo
or replace an existing one.
form_url must be either ``FlickrAPI.flickr_replace_form`` or
``FlickrAPI.flickr_upload_form``.
'''
if not filename:
raise IllegalArgumentException("filename must be specified")
if not self.token_cache.token:
raise IllegalArgumentException("Authentication is required")
# Figure out the response format
format = self.__extract_upload_response_format(kwargs)
# Update the arguments with the ones the user won't have to supply
arguments = {'auth_token': self.token_cache.token,
'api_key': self.api_key}
arguments.update(kwargs)
# Convert to UTF-8 if an argument is an Unicode string
kwargs = make_utf8(arguments)
if self.secret:
kwargs["api_sig"] = self.sign(kwargs)
url = "http://%s%s" % (self.flickr_host, form_url)
# construct POST data
body = Multipart()
for arg, value in kwargs.iteritems():
part = Part({'name': arg}, value)
body.attach(part)
filepart = FilePart({'name': 'photo'}, filename, 'image/jpeg')
body.attach(filepart)
return self.__wrap_in_parser(self.__send_multipart, format,
url, body, callback)
def __send_multipart(self, url, body, progress_callback=None):
'''Sends a Multipart object to an URL.
Returns the resulting unparsed XML from Flickr.
'''
LOG.debug("Uploading to %s" % url)
request = urllib2.Request(url)
request.add_data(str(body))
(header, value) = body.header()
request.add_header(header, value)
if not progress_callback:
# Just use urllib2 if there is no progress callback
# function
response = urllib2.urlopen(request)
return response.read()
def __upload_callback(percentage, done, seen_header=[False]):
'''Filters out the progress report on the HTTP header'''
# Call the user's progress callback when we've filtered
# out the HTTP header
if seen_header[0]:
return progress_callback(percentage, done)
# Remember the first time we hit 'done'.
if done:
seen_header[0] = True
response = reportinghttp.urlopen(request, __upload_callback)
return response.read()
def validate_frob(self, frob, perms):
'''Lets the user validate the frob by launching a browser to
the Flickr website.
'''
auth_url = self.auth_url(perms, frob)
try:
browser = webbrowser.get()
except webbrowser.Error:
if 'BROWSER' not in os.environ:
raise
browser = webbrowser.GenericBrowser(os.environ['BROWSER'])
browser.open(auth_url, True, True)
def get_token_part_one(self, perms="read", auth_callback=None):
"""Get a token either from the cache, or make a new one from
the frob.
This first attempts to find a token in the user's token cache
on disk. If that token is present and valid, it is returned by
the method.
If that fails (or if the token is no longer valid based on
flickr.auth.checkToken) a new frob is acquired. If an auth_callback
method has been specified it will be called. Otherwise the frob is
validated by having the user log into flickr (with a browser).
To get a proper token, follow these steps:
- Store the result value of this method call
- Give the user a way to signal the program that he/she
has authorized it, for example show a button that can be
pressed.
- Wait for the user to signal the program that the
authorization was performed, but only if there was no
cached token.
- Call flickrapi.get_token_part_two(...) and pass it the
result value you stored.
The newly minted token is then cached locally for the next
run.
perms
"read", "write", or "delete"
auth_callback
method to be called if authorization is needed. When not
passed, ``self.validate_frob(...)`` is called. You can
call this method yourself from the callback method too.
If authorization should be blocked, pass
``auth_callback=False``.
The auth_callback method should take ``(frob, perms)`` as
parameters.
An example::
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
Also take a look at ``authenticate_console(perms)``.
"""
# Check our auth_callback parameter for correctness before we
# do anything
authenticate = self.validate_frob
if auth_callback is not None:
if hasattr(auth_callback, '__call__'):
# use the provided callback function
authenticate = auth_callback
elif auth_callback is False:
authenticate = None
else:
# Any non-callable non-False value is invalid
raise ValueError('Invalid value for auth_callback: %s'
% auth_callback)
# see if we have a saved token
token = self.token_cache.token
frob = None
# see if it's valid
if token:
LOG.debug("Trying cached token '%s'" % token)
try:
rsp = self.auth_checkToken(auth_token=token, format='xmlnode')
# see if we have enough permissions
tokenPerms = rsp.auth[0].perms[0].text
if tokenPerms == "read" and perms != "read": token = None
elif tokenPerms == "write" and perms == "delete": token = None
except FlickrError:
LOG.debug("Cached token invalid")
self.token_cache.forget()
token = None
# get a new token if we need one
if not token:
# If we can't authenticate, it's all over.
if not authenticate:
raise FlickrError('Authentication required but '
'blocked using auth_callback=False')
# get the frob
LOG.debug("Getting frob for new token")
rsp = self.auth_getFrob(auth_token=None, format='xmlnode')
frob = rsp.frob[0].text
authenticate(frob, perms)
return (token, frob)
def get_token_part_two(self, (token, frob)):
"""Part two of getting a token, see ``get_token_part_one(...)`` for details."""
# If a valid token was obtained in the past, we're done
if token:
LOG.debug("get_token_part_two: no need, token already there")
self.token_cache.token = token
return token
LOG.debug("get_token_part_two: getting a new token for frob '%s'" % frob)
return self.get_token(frob)
def get_token(self, frob):
'''Gets the token given a certain frob. Used by ``get_token_part_two`` and
by the web authentication method.
'''
# get a token
rsp = self.auth_getToken(frob=frob, auth_token=None, format='xmlnode')
token = rsp.auth[0].token[0].text
LOG.debug("get_token: new token '%s'" % token)
# store the auth info for next time
self.token_cache.token = token
return token
def authenticate_console(self, perms='read', auth_callback=None):
'''Performs the authentication, assuming a console program.
Gets the token, if needed starts the browser and waits for the user to
press ENTER before continuing.
See ``get_token_part_one(...)`` for an explanation of the
parameters.
'''
(token, frob) = self.get_token_part_one(perms, auth_callback)
if not token: raw_input("Press ENTER after you authorized this program")
self.get_token_part_two((token, frob))
@require_format('etree')
def __data_walker(self, method, **params):
'''Calls 'method' with page=0, page=1 etc. until the total
number of pages has been visited. Yields the photos
returned.
Assumes that ``method(page=n, **params).findall('*/photos')``
results in a list of photos, and that the toplevel element of
the result contains a 'pages' attribute with the total number
of pages.
'''
page = 1
total = 1 # We don't know that yet, update when needed
while page <= total:
# Fetch a single page of photos
LOG.debug('Calling %s(page=%i of %i, %s)' %
(method.func_name, page, total, params))
rsp = method(page=page, **params)
photoset = rsp.getchildren()[0]
total = int(photoset.get('pages'))
photos = rsp.findall('*/photo')
# Yield each photo
for photo in photos:
yield photo
# Ready to get the next page
page += 1
@require_format('etree')
def walk_set(self, photoset_id, per_page=50, **kwargs):
'''walk_set(self, photoset_id, per_page=50, ...) -> \
generator, yields each photo in a single set.
:Parameters:
photoset_id
the photoset ID
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photosets.getPhotos_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photosets.getPhotos:
http://www.flickr.com/services/api/flickr.photosets.getPhotos.html
Uses the ElementTree format, incompatible with other formats.
'''
return self.__data_walker(self.photosets_getPhotos,
photoset_id=photoset_id, per_page=per_page, **kwargs)
@require_format('etree')
def walk(self, per_page=50, **kwargs):
'''walk(self, user_id=..., tags=..., ...) -> generator, \
yields each photo in a search query result
Accepts the same parameters as flickr.photos.search_ API call,
except for ``page`` because all pages will be returned
eventually.
.. _flickr.photos.search:
http://www.flickr.com/services/api/flickr.photos.search.html
Also see `walk_set`.
'''
return self.__data_walker(self.photos_search,
per_page=per_page, **kwargs)
def set_log_level(level):
'''Sets the log level of the logger used by the FlickrAPI module.
>>> import flickrapi
>>> import logging
>>> flickrapi.set_log_level(logging.INFO)
'''
import flickrapi.tokencache
LOG.setLevel(level)
flickrapi.tokencache.LOG.setLevel(level)
if __name__ == "__main__":
print "Running doctests"
import doctest
doctest.testmod()
print "Tests OK"
| Python |
# -*- coding: utf-8 -*-
'''Helper functions for the short http://fli.kr/p/... URL notation.
Photo IDs can be converted to and from Base58 short IDs, and a short
URL can be generated from a photo ID.
The implementation of the encoding and decoding functions is based on
the posts by stevefaeembra and Kohichi on
http://www.flickr.com/groups/api/discuss/72157616713786392/
'''
__all__ = ['encode', 'decode', 'url', 'SHORT_URL']
ALPHABET = u'123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
ALPHALEN = len(ALPHABET)
SHORT_URL = u'http://flic.kr/p/%s'
def encode(photo_id):
'''encode(photo_id) -> short id
>>> encode(u'4325695128')
u'7Afjsu'
>>> encode(u'2811466321')
u'5hruZg'
'''
photo_id = int(photo_id)
encoded = u''
while photo_id >= ALPHALEN:
div, mod = divmod(photo_id, ALPHALEN)
encoded = ALPHABET[mod] + encoded
photo_id = int(div)
encoded = ALPHABET[photo_id] + encoded
return encoded
def decode(short_id):
'''decode(short id) -> photo id
>>> decode(u'7Afjsu')
u'4325695128'
>>> decode(u'5hruZg')
u'2811466321'
'''
decoded = 0
multi = 1
for i in xrange(len(short_id)-1, -1, -1):
char = short_id[i]
index = ALPHABET.index(char)
decoded = decoded + multi * index
multi = multi * len(ALPHABET)
return unicode(decoded)
def url(photo_id):
'''url(photo id) -> short url
>>> url(u'4325695128')
u'http://flic.kr/p/7Afjsu'
>>> url(u'2811466321')
u'http://flic.kr/p/5hruZg'
'''
short_id = encode(photo_id)
return SHORT_URL % short_id
| Python |
'''Exceptions used by the FlickrAPI module.'''
class IllegalArgumentException(ValueError):
'''Raised when a method is passed an illegal argument.
More specific details will be included in the exception message
when thrown.
'''
class FlickrError(Exception):
'''Raised when a Flickr method fails.
More specific details will be included in the exception message
when thrown.
'''
class CancelUpload(Exception):
'''Raise this exception in an upload/replace callback function to
abort the upload.
'''
class LockingError(Exception):
'''Raised when TokenCache cannot acquire a lock within the timeout
period, or when a lock release is attempted when the lock does not
belong to this process.
'''
| Python |
# -*- encoding: utf-8 -*-
'''Module for encoding data as form-data/multipart'''
import os
import base64
class Part(object):
'''A single part of the multipart data.
>>> Part({'name': 'headline'}, 'Nice Photo')
... # doctest: +ELLIPSIS
<flickrapi.multipart.Part object at 0x...>
>>> image = open('tests/photo.jpg')
>>> Part({'name': 'photo', 'filename': image}, image.read(), 'image/jpeg')
... # doctest: +ELLIPSIS
<flickrapi.multipart.Part object at 0x...>
'''
def __init__(self, parameters, payload, content_type=None):
self.content_type = content_type
self.parameters = parameters
self.payload = payload
def render(self):
'''Renders this part -> List of Strings'''
parameters = ['%s="%s"' % (k, v)
for k, v in self.parameters.iteritems()]
lines = ['Content-Disposition: form-data; %s' % '; '.join(parameters)]
if self.content_type:
lines.append("Content-Type: %s" % self.content_type)
lines.append('')
if isinstance(self.payload, unicode):
lines.append(self.payload.encode('utf-8'))
else:
lines.append(self.payload)
return lines
class FilePart(Part):
'''A single part with a file as the payload
This example has the same semantics as the second Part example:
>>> FilePart({'name': 'photo'}, 'tests/photo.jpg', 'image/jpeg')
... #doctest: +ELLIPSIS
<flickrapi.multipart.FilePart object at 0x...>
'''
def __init__(self, parameters, filename, content_type):
parameters['filename'] = filename
imagefile = open(filename, 'rb')
payload = imagefile.read()
imagefile.close()
Part.__init__(self, parameters, payload, content_type)
def boundary():
"""Generate a random boundary, a bit like Python 2.5's uuid module."""
bytes = os.urandom(16)
return base64.b64encode(bytes, 'ab').strip('=')
class Multipart(object):
'''Container for multipart data'''
def __init__(self):
'''Creates a new Multipart.'''
self.parts = []
self.content_type = 'form-data/multipart'
self.boundary = boundary()
def attach(self, part):
'''Attaches a part'''
self.parts.append(part)
def __str__(self):
'''Renders the Multipart'''
lines = []
for part in self.parts:
lines += ['--' + self.boundary]
lines += part.render()
lines += ['--' + self.boundary + "--"]
return '\r\n'.join(lines)
def header(self):
'''Returns the top-level HTTP header of this multipart'''
return ("Content-Type",
"multipart/form-data; boundary=%s" % self.boundary)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''A FlickrAPI interface.
The main functionality can be found in the `flickrapi.FlickrAPI`
class.
See `the FlickrAPI homepage`_ for more info.
.. _`the FlickrAPI homepage`: http://stuvel.eu/projects/flickrapi
'''
__version__ = '1.4.2'
__all__ = ('FlickrAPI', 'IllegalArgumentException', 'FlickrError',
'CancelUpload', 'XMLNode', 'set_log_level', '__version__')
__author__ = u'Sybren St\u00fcvel'.encode('utf-8')
# Copyright (c) 2007 by the respective coders, see
# http://www.stuvel.eu/projects/flickrapi
#
# This code is subject to the Python licence, as can be read on
# http://www.python.org/download/releases/2.5.2/license/
#
# For those without an internet connection, here is a summary. When this
# summary clashes with the Python licence, the latter will be applied.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import urllib
import urllib2
import os.path
import logging
import copy
import webbrowser
# Smartly import hashlib and fall back on md5
try: from hashlib import md5
except ImportError: from md5 import md5
from flickrapi.tokencache import TokenCache, SimpleTokenCache, \
LockingTokenCache
from flickrapi.xmlnode import XMLNode
from flickrapi.multipart import Part, Multipart, FilePart
from flickrapi.exceptions import *
from flickrapi.cache import SimpleCache
from flickrapi import reportinghttp
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def make_utf8(dictionary):
'''Encodes all Unicode strings in the dictionary to UTF-8. Converts
all other objects to regular strings.
Returns a copy of the dictionary, doesn't touch the original.
'''
result = {}
for (key, value) in dictionary.iteritems():
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
value = str(value)
result[key] = value
return result
def debug(method):
'''Method decorator for debugging method calls.
Using this automatically sets the log level to DEBUG.
'''
LOG.setLevel(logging.DEBUG)
def debugged(*args, **kwargs):
LOG.debug("Call: %s(%s, %s)" % (method.__name__, args,
kwargs))
result = method(*args, **kwargs)
LOG.debug("\tResult: %s" % result)
return result
return debugged
# REST parsers, {format: parser_method, ...}. Fill by using the
# @rest_parser(format) function decorator
rest_parsers = {}
def rest_parser(format):
'''Method decorator, use this to mark a function as the parser for
REST as returned by Flickr.
'''
def decorate_parser(method):
rest_parsers[format] = method
return method
return decorate_parser
def require_format(required_format):
'''Method decorator, raises a ValueError when the decorated method
is called if the default format is not set to ``required_format``.
'''
def decorator(method):
def decorated(self, *args, **kwargs):
# If everything is okay, call the method
if self.default_format == required_format:
return method(self, *args, **kwargs)
# Otherwise raise an exception
msg = 'Function %s requires that you use ' \
'ElementTree ("etree") as the communication format, ' \
'while the current format is set to "%s".'
raise ValueError(msg % (method.func_name, self.default_format))
return decorated
return decorator
class FlickrAPI(object):
"""Encapsulates Flickr functionality.
Example usage::
flickr = flickrapi.FlickrAPI(api_key)
photos = flickr.photos_search(user_id='73509078@N00', per_page='10')
sets = flickr.photosets_getList(user_id='73509078@N00')
"""
flickr_host = "api.flickr.com"
flickr_rest_form = "/services/rest/"
flickr_auth_form = "/services/auth/"
flickr_upload_form = "/services/upload/"
flickr_replace_form = "/services/replace/"
def __init__(self, api_key, secret=None, username=None,
token=None, format='etree', store_token=True,
cache=False):
"""Construct a new FlickrAPI instance for a given API key
and secret.
api_key
The API key as obtained from Flickr.
secret
The secret belonging to the API key.
username
Used to identify the appropriate authentication token for a
certain user.
token
If you already have an authentication token, you can give
it here. It won't be stored on disk by the FlickrAPI instance.
format
The response format. Use either "xmlnode" or "etree" to get a parsed
response, or use any response format supported by Flickr to get an
unparsed response from method calls. It's also possible to pass the
``format`` parameter on individual calls.
store_token
Disables the on-disk token cache if set to False (default is True).
Use this to ensure that tokens aren't read nor written to disk, for
example in web applications that store tokens in cookies.
cache
Enables in-memory caching of FlickrAPI calls - set to ``True`` to
use. If you don't want to use the default settings, you can
instantiate a cache yourself too:
>>> f = FlickrAPI(api_key='123')
>>> f.cache = SimpleCache(timeout=5, max_entries=100)
"""
self.api_key = api_key
self.secret = secret
self.default_format = format
self.__handler_cache = {}
if token:
# Use a memory-only token cache
self.token_cache = SimpleTokenCache()
self.token_cache.token = token
elif not store_token:
# Use an empty memory-only token cache
self.token_cache = SimpleTokenCache()
else:
# Use a real token cache
self.token_cache = TokenCache(api_key, username)
if cache:
self.cache = SimpleCache()
else:
self.cache = None
def __repr__(self):
'''Returns a string representation of this object.'''
return '[FlickrAPI for key "%s"]' % self.api_key
__str__ = __repr__
def trait_names(self):
'''Returns a list of method names as supported by the Flickr
API. Used for tab completion in IPython.
'''
try:
rsp = self.reflection_getMethods(format='etree')
except FlickrError:
return None
def tr(name):
'''Translates Flickr names to something that can be called
here.
>>> tr(u'flickr.photos.getInfo')
u'photos_getInfo'
'''
return name[7:].replace('.', '_')
return [tr(m.text) for m in rsp.getiterator('method')]
@rest_parser('xmlnode')
def parse_xmlnode(self, rest_xml):
'''Parses a REST XML response from Flickr into an XMLNode object.'''
rsp = XMLNode.parse(rest_xml, store_xml=True)
if rsp['stat'] == 'ok':
return rsp
err = rsp.err[0]
raise FlickrError(u'Error: %(code)s: %(msg)s' % err)
@rest_parser('etree')
def parse_etree(self, rest_xml):
'''Parses a REST XML response from Flickr into an ElementTree object.'''
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
# For Python 2.4 compatibility:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
raise ImportError("You need to install "
"ElementTree for using the etree format")
rsp = ElementTree.fromstring(rest_xml)
if rsp.attrib['stat'] == 'ok':
return rsp
err = rsp.find('err')
raise FlickrError(u'Error: %(code)s: %(msg)s' % err.attrib)
def sign(self, dictionary):
"""Calculate the flickr signature for a set of params.
data
a hash of all the params and values to be hashed, e.g.
``{"api_key":"AAAA", "auth_token":"TTTT", "key":
u"value".encode('utf-8')}``
"""
data = [self.secret]
for key in sorted(dictionary.keys()):
data.append(key)
datum = dictionary[key]
if isinstance(datum, unicode):
raise IllegalArgumentException("No Unicode allowed, "
"argument %s (%r) should have been UTF-8 by now"
% (key, datum))
data.append(datum)
md5_hash = md5(''.join(data))
return md5_hash.hexdigest()
def encode_and_sign(self, dictionary):
'''URL encodes the data in the dictionary, and signs it using the
given secret, if a secret was given.
'''
dictionary = make_utf8(dictionary)
if self.secret:
dictionary['api_sig'] = self.sign(dictionary)
return urllib.urlencode(dictionary)
def __getattr__(self, attrib):
"""Handle all the regular Flickr API calls.
Example::
flickr.auth_getFrob(api_key="AAAAAA")
etree = flickr.photos_getInfo(photo_id='1234')
etree = flickr.photos_getInfo(photo_id='1234', format='etree')
xmlnode = flickr.photos_getInfo(photo_id='1234', format='xmlnode')
json = flickr.photos_getInfo(photo_id='1234', format='json')
"""
# Refuse to act as a proxy for unimplemented special methods
if attrib.startswith('_'):
raise AttributeError("No such attribute '%s'" % attrib)
# Construct the method name and see if it's cached
method = "flickr." + attrib.replace("_", ".")
if method in self.__handler_cache:
return self.__handler_cache[method]
def handler(**args):
'''Dynamically created handler for a Flickr API call'''
if self.token_cache.token and not self.secret:
raise ValueError("Auth tokens cannot be used without "
"API secret")
# Set some defaults
defaults = {'method': method,
'auth_token': self.token_cache.token,
'api_key': self.api_key,
'format': self.default_format}
args = self.__supply_defaults(args, defaults)
return self.__wrap_in_parser(self.__flickr_call,
parse_format=args['format'], **args)
handler.method = method
self.__handler_cache[method] = handler
return handler
def __supply_defaults(self, args, defaults):
'''Returns a new dictionary containing ``args``, augmented with defaults
from ``defaults``.
Defaults can be overridden, or completely removed by setting the
appropriate value in ``args`` to ``None``.
>>> f = FlickrAPI('123')
>>> f._FlickrAPI__supply_defaults(
... {'foo': 'bar', 'baz': None, 'token': None},
... {'baz': 'foobar', 'room': 'door'})
{'foo': 'bar', 'room': 'door'}
'''
result = args.copy()
for key, default_value in defaults.iteritems():
# Set the default if the parameter wasn't passed
if key not in args:
result[key] = default_value
for key, value in result.copy().iteritems():
# You are able to remove a default by assigning None, and we can't
# pass None to Flickr anyway.
if result[key] is None:
del result[key]
return result
def __flickr_call(self, **kwargs):
'''Performs a Flickr API call with the given arguments. The method name
itself should be passed as the 'method' parameter.
Returns the unparsed data from Flickr::
data = self.__flickr_call(method='flickr.photos.getInfo',
photo_id='123', format='rest')
'''
LOG.debug("Calling %s" % kwargs)
post_data = self.encode_and_sign(kwargs)
# Return value from cache if available
if self.cache and self.cache.get(post_data):
return self.cache.get(post_data)
url = "http://" + self.flickr_host + self.flickr_rest_form
flicksocket = urllib2.urlopen(url, post_data)
reply = flicksocket.read()
flicksocket.close()
# Store in cache, if we have one
if self.cache is not None:
self.cache.set(post_data, reply)
return reply
def __wrap_in_parser(self, wrapped_method, parse_format, *args, **kwargs):
'''Wraps a method call in a parser.
The parser will be looked up by the ``parse_format`` specifier. If there
is a parser and ``kwargs['format']`` is set, it's set to ``rest``, and
the response of the method is parsed before it's returned.
'''
# Find the parser, and set the format to rest if we're supposed to
# parse it.
if parse_format in rest_parsers and 'format' in kwargs:
kwargs['format'] = 'rest'
LOG.debug('Wrapping call %s(self, %s, %s)' % (wrapped_method, args,
kwargs))
data = wrapped_method(*args, **kwargs)
# Just return if we have no parser
if parse_format not in rest_parsers:
return data
# Return the parsed data
parser = rest_parsers[parse_format]
return parser(self, data)
def auth_url(self, perms, frob):
"""Return the authorization URL to get a token.
This is the URL the app will launch a browser toward if it
needs a new token.
perms
"read", "write", or "delete"
frob
picked up from an earlier call to FlickrAPI.auth_getFrob()
"""
encoded = self.encode_and_sign({
"api_key": self.api_key,
"frob": frob,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def web_login_url(self, perms):
'''Returns the web login URL to forward web users to.
perms
"read", "write", or "delete"
'''
encoded = self.encode_and_sign({
"api_key": self.api_key,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def __extract_upload_response_format(self, kwargs):
'''Returns the response format given in kwargs['format'], or
the default format if there is no such key.
If kwargs contains 'format', it is removed from kwargs.
If the format isn't compatible with Flickr's upload response
type, a FlickrError exception is raised.
'''
# Figure out the response format
format = kwargs.get('format', self.default_format)
if format not in rest_parsers and format != 'rest':
raise FlickrError('Format %s not supported for uploading '
'photos' % format)
# The format shouldn't be used in the request to Flickr.
if 'format' in kwargs:
del kwargs['format']
return format
def upload(self, filename, callback=None, **kwargs):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
filename
name of a file to upload
callback
method that gets progress reports
title
title of the photo
description
description a.k.a. caption of the photo
tags
space-delimited list of tags, ``'''tag1 tag2 "long
tag"'''``
is_public
"1" or "0" for a public resp. private photo
is_friend
"1" or "0" whether friends can see the photo while it's
marked as private
is_family
"1" or "0" whether family can see the photo while it's
marked as private
content_type
Set to "1" for Photo, "2" for Screenshot, or "3" for Other.
hidden
Set to "1" to keep the photo in global search results, "2"
to hide from public searches.
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST.
The callback method should take two parameters:
``def callback(progress, done)``
Progress is a number between 0 and 100, and done is a boolean
that's true only when the upload is done.
"""
return self.__upload_to_form(self.flickr_upload_form,
filename, callback, **kwargs)
def replace(self, filename, photo_id, callback=None, **kwargs):
"""Replace an existing photo.
Supported parameters:
filename
name of a file to upload
photo_id
the ID of the photo to replace
callback
method that gets progress reports
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST. Defaults to the
format passed to the constructor.
The callback parameter has the same semantics as described in the
``upload`` function.
"""
if not photo_id:
raise IllegalArgumentException("photo_id must be specified")
kwargs['photo_id'] = photo_id
return self.__upload_to_form(self.flickr_replace_form,
filename, callback, **kwargs)
def __upload_to_form(self, form_url, filename, callback, **kwargs):
'''Uploads a photo - can be used to either upload a new photo
or replace an existing one.
form_url must be either ``FlickrAPI.flickr_replace_form`` or
``FlickrAPI.flickr_upload_form``.
'''
if not filename:
raise IllegalArgumentException("filename must be specified")
if not self.token_cache.token:
raise IllegalArgumentException("Authentication is required")
# Figure out the response format
format = self.__extract_upload_response_format(kwargs)
# Update the arguments with the ones the user won't have to supply
arguments = {'auth_token': self.token_cache.token,
'api_key': self.api_key}
arguments.update(kwargs)
# Convert to UTF-8 if an argument is an Unicode string
kwargs = make_utf8(arguments)
if self.secret:
kwargs["api_sig"] = self.sign(kwargs)
url = "http://%s%s" % (self.flickr_host, form_url)
# construct POST data
body = Multipart()
for arg, value in kwargs.iteritems():
part = Part({'name': arg}, value)
body.attach(part)
filepart = FilePart({'name': 'photo'}, filename, 'image/jpeg')
body.attach(filepart)
return self.__wrap_in_parser(self.__send_multipart, format,
url, body, callback)
def __send_multipart(self, url, body, progress_callback=None):
'''Sends a Multipart object to an URL.
Returns the resulting unparsed XML from Flickr.
'''
LOG.debug("Uploading to %s" % url)
request = urllib2.Request(url)
request.add_data(str(body))
(header, value) = body.header()
request.add_header(header, value)
if not progress_callback:
# Just use urllib2 if there is no progress callback
# function
response = urllib2.urlopen(request)
return response.read()
def __upload_callback(percentage, done, seen_header=[False]):
'''Filters out the progress report on the HTTP header'''
# Call the user's progress callback when we've filtered
# out the HTTP header
if seen_header[0]:
return progress_callback(percentage, done)
# Remember the first time we hit 'done'.
if done:
seen_header[0] = True
response = reportinghttp.urlopen(request, __upload_callback)
return response.read()
def validate_frob(self, frob, perms):
'''Lets the user validate the frob by launching a browser to
the Flickr website.
'''
auth_url = self.auth_url(perms, frob)
try:
browser = webbrowser.get()
except webbrowser.Error:
if 'BROWSER' not in os.environ:
raise
browser = webbrowser.GenericBrowser(os.environ['BROWSER'])
browser.open(auth_url, True, True)
def get_token_part_one(self, perms="read", auth_callback=None):
"""Get a token either from the cache, or make a new one from
the frob.
This first attempts to find a token in the user's token cache
on disk. If that token is present and valid, it is returned by
the method.
If that fails (or if the token is no longer valid based on
flickr.auth.checkToken) a new frob is acquired. If an auth_callback
method has been specified it will be called. Otherwise the frob is
validated by having the user log into flickr (with a browser).
To get a proper token, follow these steps:
- Store the result value of this method call
- Give the user a way to signal the program that he/she
has authorized it, for example show a button that can be
pressed.
- Wait for the user to signal the program that the
authorization was performed, but only if there was no
cached token.
- Call flickrapi.get_token_part_two(...) and pass it the
result value you stored.
The newly minted token is then cached locally for the next
run.
perms
"read", "write", or "delete"
auth_callback
method to be called if authorization is needed. When not
passed, ``self.validate_frob(...)`` is called. You can
call this method yourself from the callback method too.
If authorization should be blocked, pass
``auth_callback=False``.
The auth_callback method should take ``(frob, perms)`` as
parameters.
An example::
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
Also take a look at ``authenticate_console(perms)``.
"""
# Check our auth_callback parameter for correctness before we
# do anything
authenticate = self.validate_frob
if auth_callback is not None:
if hasattr(auth_callback, '__call__'):
# use the provided callback function
authenticate = auth_callback
elif auth_callback is False:
authenticate = None
else:
# Any non-callable non-False value is invalid
raise ValueError('Invalid value for auth_callback: %s'
% auth_callback)
# see if we have a saved token
token = self.token_cache.token
frob = None
# see if it's valid
if token:
LOG.debug("Trying cached token '%s'" % token)
try:
rsp = self.auth_checkToken(auth_token=token, format='xmlnode')
# see if we have enough permissions
tokenPerms = rsp.auth[0].perms[0].text
if tokenPerms == "read" and perms != "read": token = None
elif tokenPerms == "write" and perms == "delete": token = None
except FlickrError:
LOG.debug("Cached token invalid")
self.token_cache.forget()
token = None
# get a new token if we need one
if not token:
# If we can't authenticate, it's all over.
if not authenticate:
raise FlickrError('Authentication required but '
'blocked using auth_callback=False')
# get the frob
LOG.debug("Getting frob for new token")
rsp = self.auth_getFrob(auth_token=None, format='xmlnode')
frob = rsp.frob[0].text
authenticate(frob, perms)
return (token, frob)
def get_token_part_two(self, (token, frob)):
"""Part two of getting a token, see ``get_token_part_one(...)`` for details."""
# If a valid token was obtained in the past, we're done
if token:
LOG.debug("get_token_part_two: no need, token already there")
self.token_cache.token = token
return token
LOG.debug("get_token_part_two: getting a new token for frob '%s'" % frob)
return self.get_token(frob)
def get_token(self, frob):
'''Gets the token given a certain frob. Used by ``get_token_part_two`` and
by the web authentication method.
'''
# get a token
rsp = self.auth_getToken(frob=frob, auth_token=None, format='xmlnode')
token = rsp.auth[0].token[0].text
LOG.debug("get_token: new token '%s'" % token)
# store the auth info for next time
self.token_cache.token = token
return token
def authenticate_console(self, perms='read', auth_callback=None):
'''Performs the authentication, assuming a console program.
Gets the token, if needed starts the browser and waits for the user to
press ENTER before continuing.
See ``get_token_part_one(...)`` for an explanation of the
parameters.
'''
(token, frob) = self.get_token_part_one(perms, auth_callback)
if not token: raw_input("Press ENTER after you authorized this program")
self.get_token_part_two((token, frob))
@require_format('etree')
def __data_walker(self, method, **params):
'''Calls 'method' with page=0, page=1 etc. until the total
number of pages has been visited. Yields the photos
returned.
Assumes that ``method(page=n, **params).findall('*/photos')``
results in a list of photos, and that the toplevel element of
the result contains a 'pages' attribute with the total number
of pages.
'''
page = 1
total = 1 # We don't know that yet, update when needed
while page <= total:
# Fetch a single page of photos
LOG.debug('Calling %s(page=%i of %i, %s)' %
(method.func_name, page, total, params))
rsp = method(page=page, **params)
photoset = rsp.getchildren()[0]
total = int(photoset.get('pages'))
photos = rsp.findall('*/photo')
# Yield each photo
for photo in photos:
yield photo
# Ready to get the next page
page += 1
@require_format('etree')
def walk_set(self, photoset_id, per_page=50, **kwargs):
'''walk_set(self, photoset_id, per_page=50, ...) -> \
generator, yields each photo in a single set.
:Parameters:
photoset_id
the photoset ID
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photosets.getPhotos_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photosets.getPhotos:
http://www.flickr.com/services/api/flickr.photosets.getPhotos.html
Uses the ElementTree format, incompatible with other formats.
'''
return self.__data_walker(self.photosets_getPhotos,
photoset_id=photoset_id, per_page=per_page, **kwargs)
@require_format('etree')
def walk(self, per_page=50, **kwargs):
'''walk(self, user_id=..., tags=..., ...) -> generator, \
yields each photo in a search query result
Accepts the same parameters as flickr.photos.search_ API call,
except for ``page`` because all pages will be returned
eventually.
.. _flickr.photos.search:
http://www.flickr.com/services/api/flickr.photos.search.html
Also see `walk_set`.
'''
return self.__data_walker(self.photos_search,
per_page=per_page, **kwargs)
def set_log_level(level):
'''Sets the log level of the logger used by the FlickrAPI module.
>>> import flickrapi
>>> import logging
>>> flickrapi.set_log_level(logging.INFO)
'''
import flickrapi.tokencache
LOG.setLevel(level)
flickrapi.tokencache.LOG.setLevel(level)
if __name__ == "__main__":
print "Running doctests"
import doctest
doctest.testmod()
print "Tests OK"
| Python |
# -*- encoding: utf-8 -*-
'''HTTPHandler that supports a callback method for progress reports.
'''
import urllib2
import httplib
import logging
__all__ = ['urlopen']
logging.basicConfig()
LOG = logging.getLogger(__name__)
progress_callback = None
class ReportingSocket(object):
'''Wrapper around a socket. Gives progress report through a
callback function.
'''
min_chunksize = 10240
def __init__(self, socket):
self.socket = socket
def sendall(self, bits):
'''Sends all data, calling the callback function for every
sent chunk.
'''
LOG.debug("SENDING: %s..." % bits[0:30])
total = len(bits)
sent = 0
chunksize = max(self.min_chunksize, total // 100)
while len(bits) > 0:
send = bits[0:chunksize]
self.socket.sendall(send)
sent += len(send)
if progress_callback:
progress = float(sent) / total * 100
progress_callback(progress, sent == total)
bits = bits[chunksize:]
def makefile(self, mode, bufsize):
'''Returns a file-like object for the socket.'''
return self.socket.makefile(mode, bufsize)
def close(self):
'''Closes the socket.'''
return self.socket.close()
class ProgressHTTPConnection(httplib.HTTPConnection):
'''HTTPConnection that gives regular progress reports during
sending of data.
'''
def connect(self):
'''Connects to a HTTP server.'''
httplib.HTTPConnection.connect(self)
self.sock = ReportingSocket(self.sock)
class ProgressHTTPHandler(urllib2.HTTPHandler):
'''HTTPHandler that gives regular progress reports during sending
of data.
'''
def http_open(self, req):
return self.do_open(ProgressHTTPConnection, req)
def set_callback(method):
'''Sets the callback function to use for progress reports.'''
global progress_callback # IGNORE:W0603
if not hasattr(method, '__call__'):
raise ValueError('Callback method must be callable')
progress_callback = method
def urlopen(url_or_request, callback, body=None):
'''Opens an URL using the ProgressHTTPHandler.'''
set_callback(callback)
opener = urllib2.build_opener(ProgressHTTPHandler)
return opener.open(url_or_request, body)
if __name__ == '__main__':
def upload(progress, finished):
'''Upload progress demo'''
LOG.info("%3.0f - %s" % (progress, finished))
conn = urlopen("http://www.flickr.com/", 'x' * 10245, upload)
data = conn.read()
LOG.info("Read data")
print data[:100].split('\n')[0]
| Python |
'''FlickrAPI uses its own in-memory XML representation, to be able to easily
use the info returned from Flickr.
There is no need to use this module directly, you'll get XMLNode instances
from the FlickrAPI method calls.
'''
import xml.dom.minidom
__all__ = ('XMLNode', )
class XMLNode:
"""XMLNode -- generic class for holding an XML node
>>> xml_str = '''<xml foo="32">
... <taggy bar="10">Name0</taggy>
... <taggy bar="11" baz="12">Name1</taggy>
... </xml>'''
>>> f = XMLNode.parse(xml_str)
>>> f.name
u'xml'
>>> f['foo']
u'32'
>>> f.taggy[0].name
u'taggy'
>>> f.taggy[0]["bar"]
u'10'
>>> f.taggy[0].text
u'Name0'
>>> f.taggy[1].name
u'taggy'
>>> f.taggy[1]["bar"]
u'11'
>>> f.taggy[1]["baz"]
u'12'
"""
def __init__(self):
"""Construct an empty XML node."""
self.name = ""
self.text = ""
self.attrib = {}
self.xml = None
def __setitem__(self, key, item):
"""Store a node's attribute in the attrib hash."""
self.attrib[key] = item
def __getitem__(self, key):
"""Retrieve a node's attribute from the attrib hash."""
return self.attrib[key]
@classmethod
def __parse_element(cls, element, this_node):
"""Recursive call to process this XMLNode."""
this_node.name = element.nodeName
# add element attributes as attributes to this node
for i in range(element.attributes.length):
an = element.attributes.item(i)
this_node[an.name] = an.nodeValue
for a in element.childNodes:
if a.nodeType == xml.dom.Node.ELEMENT_NODE:
child = XMLNode()
# Ugly fix for an ugly bug. If an XML element <name />
# exists, it now overwrites the 'name' attribute
# storing the XML element name.
if not hasattr(this_node, a.nodeName) or a.nodeName == 'name':
setattr(this_node, a.nodeName, [])
# add the child node as an attrib to this node
children = getattr(this_node, a.nodeName)
children.append(child)
cls.__parse_element(a, child)
elif a.nodeType == xml.dom.Node.TEXT_NODE:
this_node.text += a.nodeValue
return this_node
@classmethod
def parse(cls, xml_str, store_xml=False):
"""Convert an XML string into a nice instance tree of XMLNodes.
xml_str -- the XML to parse
store_xml -- if True, stores the XML string in the root XMLNode.xml
"""
dom = xml.dom.minidom.parseString(xml_str)
# get the root
root_node = XMLNode()
if store_xml: root_node.xml = xml_str
return cls.__parse_element(dom.firstChild, root_node)
| Python |
# -*- encoding: utf-8 -*-
'''Call result cache.
Designed to have the same interface as the `Django low-level cache API`_.
Heavily inspired (read: mostly copied-and-pasted) from the Django framework -
thanks to those guys for designing a simple and effective cache!
.. _`Django low-level cache API`: http://www.djangoproject.com/documentation/cache/#the-low-level-cache-api
'''
import threading
import time
class SimpleCache(object):
'''Simple response cache for FlickrAPI calls.
This stores max 50 entries, timing them out after 120 seconds:
>>> cache = SimpleCache(timeout=120, max_entries=50)
'''
def __init__(self, timeout=300, max_entries=200):
self.storage = {}
self.expire_info = {}
self.lock = threading.RLock()
self.default_timeout = timeout
self.max_entries = max_entries
self.cull_frequency = 3
def locking(method):
'''Method decorator, ensures the method call is locked'''
def locked(self, *args, **kwargs):
self.lock.acquire()
try:
return method(self, *args, **kwargs)
finally:
self.lock.release()
return locked
@locking
def get(self, key, default=None):
'''Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
'''
now = time.time()
exp = self.expire_info.get(key)
if exp is None:
return default
elif exp < now:
self.delete(key)
return default
return self.storage[key]
@locking
def set(self, key, value, timeout=None):
'''Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
'''
if len(self.storage) >= self.max_entries:
self.cull()
if timeout is None:
timeout = self.default_timeout
self.storage[key] = value
self.expire_info[key] = time.time() + timeout
@locking
def delete(self, key):
'''Deletes a key from the cache, failing silently if it doesn't exist.'''
if key in self.storage:
del self.storage[key]
if key in self.expire_info:
del self.expire_info[key]
@locking
def has_key(self, key):
'''Returns True if the key is in the cache and has not expired.'''
return self.get(key) is not None
@locking
def __contains__(self, key):
'''Returns True if the key is in the cache and has not expired.'''
return self.has_key(key)
@locking
def cull(self):
'''Reduces the number of cached items'''
doomed = [k for (i, k) in enumerate(self.storage)
if i % self.cull_frequency == 0]
for k in doomed:
self.delete(k)
@locking
def __len__(self):
'''Returns the number of cached items -- they might be expired
though.
'''
return len(self.storage)
| Python |
#!/usr/bin/env python
'''Python distutils install script.
Run with "python setup.py install" to install FlickrAPI
'''
import distribute_setup
distribute_setup.use_setuptools()
import sys
# Check the Python version
(major, minor) = sys.version_info[:2]
if (major, minor) < (2, 4):
raise SystemExit("Sorry, Python 2.4 or newer required")
import _setup
| Python |
#!/usr/bin/env python
import tests
tests.run_tests()
| Python |
#!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.8"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: 0.6c9
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
"""
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
assert _python_cmd('setup.py', 'install')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
try:
from setuptools.sandbox import DirectorySandbox
def _violation(*args):
pass
DirectorySandbox._violation = _violation
except ImportError:
pass
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-0.6c9-py%s.egg-info' % pyver
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
elif option == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
# -*- encoding: utf-8 -*-
'''The setup code is split into two sections, one in setup.py which
contains very simple Python code and checks the Python version, and
this file, which contains code only parsable by 2.4+.
'''
__author__ = 'Sybren A. Stuvel'
from setuptools import setup, Distribution
import os
import sys
try:
import docutils.core
except ImportError:
docutils = None
from flickrapi import __version__
# This will be set to True when either the documentation is already
# there, or if we can build it.
documentation_available = False
class OurDistribution(Distribution):
'''Distribution that also generates the flickrapi.html'''
def run_command(self, command):
'''Builds the documentation if needed, then passes control to
the superclass' run_command(...) method.
'''
if command == 'install_data' and docutils:
print 'creating doc/index.html'
docutils.core.publish_file(writer_name='html',
source=open('doc/index.rst'),
source_path='doc',
destination=open('doc/index.html', 'w'),
destination_path='doc',
settings_overrides={'stylesheet_path':
'doc/documentation.css'}
)
Distribution.run_command(self, command)
data = {
'name': 'flickrapi',
'version': __version__,
'author': 'Beej Jorgensen and Sybren A. Stuvel',
'author_email': 'beej@beej.us',
'maintainer': 'Sybren A. Stuvel',
'maintainer_email': 'sybren@stuvel.eu',
'url': 'http://stuvel.eu/projects/flickrapi',
'description': 'The official Python interface to the Flickr API',
'long_description': 'The easiest to use, most complete, and '
'most actively developed Python interface to the Flickr API.'
'It includes support for authorized and non-authorized '
'access, uploading and replacing photos, and all Flickr API '
'functions.',
'packages': ['flickrapi'],
'data_files': ['LICENSE', 'README', 'UPGRADING'],
'license': 'Python',
'classifiers': [
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: Python License (CNRI Python License)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
'extras_require': {
'ElementTree': ["elementtree>=1.2.6"],
},
'distclass': OurDistribution,
'zip_safe': True,
'test_suite': 'tests',
}
(major, minor) = sys.version_info[:2]
if major == 2 and minor < 5:
# We still want to use this function, but Python 2.4 doesn't have
# it built-in.
def all(iterator):
for item in iterator:
if not item: return False
return True
alldocs = ['doc/index.html', 'doc/documentation.css', 'doc/html4css1.css']
if docutils or all(os.path.exists(doc) for doc in alldocs):
# Only include documentation if it can be built, or if it has been
# built already
data['data_files'].append(('share/doc/flickrapi-%s' % __version__, alldocs))
documentation_available = True
else:
print "======================================================================="
print "WARNING: Unable to import docutils, documentation will not be included"
print "Documentation for the latest version can be found at"
print "http://flickrapi.sourceforge.net/documentation"
print "======================================================================="
print
setup(**data)
| Python |
#!/usr/bin/env python
'''Unittest for the FlickrAPI.
Far from complete, but it's a start.
'''
import logging
import pkg_resources
import StringIO
import sys
import types
import unittest
import urllib
import urllib2
import webbrowser
# Make sure the flickrapi module from the source distribution is used
sys.path.insert(0, '..')
import flickrapi
flickrapi.set_log_level(logging.FATAL)
#flickrapi.set_log_level(logging.DEBUG)
print "Testing FlickrAPI version %s" % flickrapi.__version__
# Some useful constants
EURO_UNICODE = u'\u20ac'
EURO_UTF8 = EURO_UNICODE.encode('utf-8')
U_UML_UNICODE = u'\u00fc'
U_UML_UTF8 = U_UML_UNICODE.encode('utf-8')
key = 'ecd01ab8f00faf13e1f8801586e126fd'
secret = '2ee3f558fd79f292'
logging.basicConfig()
LOG = logging.getLogger(__name__)
def etree_package():
'''Returns the name of the ElementTree package for the given
Python version.'''
current_version = sys.version_info[0:3]
if current_version < (2, 5, 0):
# For Python 2.4 and earlier, we assume ElementTree was
# downloaded and installed from pypi.
return 'elementtree.ElementTree'
return 'xml.etree.ElementTree'
class SuperTest(unittest.TestCase):
'''Superclass for unittests, provides useful methods.'''
def setUp(self):
super(SuperTest, self).setUp()
self.f = flickrapi.FlickrAPI(key, secret)
self.f_noauth = flickrapi.FlickrAPI(key)
# Remove/prevent any unwanted tokens
self.f.token_cache.forget()
self.f_noauth.token_cache = flickrapi.tokencache.SimpleTokenCache()
def print_auth_message(self, frob, perms):
sys.stderr.write("Your browser starts, press ENTER after "
"authentication")
return self.f.validate_frob(frob, perms)
def assertUrl(self, expected_protocol, expected_host, expected_path,
expected_query_arguments, actual_url):
'''Asserts that the 'actual_url' matches the given parts.'''
# Test the URL part by part
(urltype, rest) = urllib.splittype(actual_url)
self.assertEqual(expected_protocol, urltype)
(hostport, path) = urllib.splithost(rest)
self.assertEqual(expected_host, hostport)
(path, query) = urllib.splitquery(path)
self.assertEqual(expected_path, path)
attrvalues = query.split('&')
attribs = dict(av.split('=') for av in attrvalues)
self.assertEqual(expected_query_arguments, attribs)
class FlickrApiTest(SuperTest):
def test_repr(self):
'''Class name and API key should be in repr output'''
r = repr(self.f)
self.assertTrue('FlickrAPI' in r)
self.assertTrue(key in r)
def test_auth_url(self):
'''Test the authentication URL generation'''
args = dict(api_key=key, frob='frob', perms='read')
args['api_sig'] = self.f.sign(args)
url = self.f.auth_url(args['perms'], args['frob'])
self.assertUrl('http', flickrapi.FlickrAPI.flickr_host,
flickrapi.FlickrAPI.flickr_auth_form, args,
url)
def test_auth_callback(self):
'''Test auth_callback argument in get_token_part_one().'''
# make sure this test is made without a valid token in the cache
self.f.token_cache.forget()
test = {'called': False,
'frob': None}
def callback(frob, perms):
test['called'] = True
test['frob'] = frob
self.assertEqual(perms, 'delete')
self.assertTrue(frob, 'Expected to get a frob')
(token, frob) = self.f.get_token_part_one(perms="delete",
auth_callback=callback)
# The token shouldn't be set
self.assertEqual(None, token, "Expected token to be None")
# The callback function should have been called
self.assertTrue(test['called'],
'Expected callback function to be called')
self.assertEqual(frob, test['frob'],
'Expected same frob returned and passed in callback')
def test_auth_callback_false(self):
'''Test auth_callback argument in get_token_part_one().'''
# make sure this test is made without a valid token in the cache
self.f.token_cache.forget()
try:
# Prevent the webbrowser module from being called.
del flickrapi.webbrowser
# Check that an exception is raised.
self.assertRaises(flickrapi.FlickrError, self.f.get_token_part_one,
perms="read", auth_callback=False)
finally:
flickrapi.webbrowser = webbrowser
def test_auth_callback_invalid(self):
'''Test auth_callback argument in get_token_part_one().'''
self.assertRaises(ValueError, self.f.get_token_part_one,
perms="read", auth_callback='cookie')
def test_web_login_url(self):
'''Test the web login URL.'''
args = dict(api_key=key, perms='read')
args['api_sig'] = self.f.sign(args)
url = self.f.web_login_url(args['perms'])
self.assertUrl('http', flickrapi.FlickrAPI.flickr_host,
flickrapi.FlickrAPI.flickr_auth_form, args,
url)
def test_simple_search(self):
'''Test simple Flickr search'''
# We expect to be able to find kittens
result = self.f.photos_search(tags='kitten')
total = int(result.find('photos').attrib['total'])
self.assertTrue(total > 0)
def test_token_constructor(self):
'''Test passing a token to the constructor'''
token = '123-abc-def'
# Pass the token
flickr = flickrapi.FlickrAPI(key, secret, token=token)
# It should be in the in-memory token cache now
self.assertEqual(token, flickr.token_cache.token)
# But not in the on-disk token cache
self.assertNotEqual(token, flickrapi.TokenCache(key))
def test_auth_token_without_secret(self):
'''Auth tokens without secrets are meaningless'''
token = '123-abc-def'
# Create a normal FlickrAPI object
flickr = flickrapi.FlickrAPI(key)
flickr.token_cache.token = token
self.assertRaises(ValueError, flickr.photos_search,
tags='kitten')
def test_upload_without_filename(self):
'''Uploading a file without filename is impossible'''
self.assertRaises(flickrapi.IllegalArgumentException,
self.f.upload, '')
self.assertRaises(flickrapi.IllegalArgumentException,
self.f.upload, None)
def test_upload(self):
photo = pkg_resources.resource_filename(__name__, 'photo.jpg')
self.f.token_cache.username = 'unittest-upload'
self.f.authenticate_console('delete', self.print_auth_message)
result = self.f.upload(photo, is_public='0', content_type='2')
# Now remove the photo from the stream again
photo_id = result.find('photoid').text
self.f.photos_delete(photo_id=photo_id)
def test_cancel_upload(self):
photo = pkg_resources.resource_filename(__name__, 'photo.jpg')
self.f.token_cache.username = 'unittest-upload'
self.f.authenticate_console('delete', self.print_auth_message)
def callback(progress, done):
'''Callback that immediately cancels the upload'''
raise flickrapi.CancelUpload()
try:
self.f.upload(photo, callback=callback,
is_public='0', content_type='2')
self.fail("Expected exception not thrown")
except flickrapi.CancelUpload, e:
pass # Expected
def test_store_token(self):
'''Tests that store_token=False FlickrAPI uses SimpleTokenCache'''
token_disk = '123-abc-disk'
token_mem = '123-abc-mem'
# Create a non-public-only instance, and set the on-disk token
flickr = flickrapi.FlickrAPI(key, secret)
flickr.token_cache.token = token_disk
flickr = flickrapi.FlickrAPI(key, secret, store_token=False)
# The token shouldn't be set
self.assertEqual(None, flickr.token_cache.token)
# Now set it
flickr.token_cache.token = token_mem
# It should not be in the on-disk token cache, only in memory
self.assertEqual(token_disk, flickrapi.TokenCache(key).token)
self.assertNotEqual(token_mem, flickrapi.TokenCache(key).token)
def test_wrap_in_parser(self):
'''Tests wrap_in_parser'''
test = {'wrapped': False}
def to_wrap(format, test_param):
self.assertEqual('rest', format)
self.assertEqual('test_value', test_param)
test['wrapped'] = True
return '<rst stat="ok"><element photo_id="5" /></rst>'
rst = self.f._FlickrAPI__wrap_in_parser(to_wrap, parse_format='xmlnode',
format='xmlnode', test_param='test_value')
self.assertEqual('5', rst.element[0]['photo_id'])
self.assertTrue(test['wrapped'],
'Expected wrapped function to be called')
def test_wrap_in_parser_no_format(self):
'''Tests wrap_in_parser without a format in the wrapped arguments'''
test = {'wrapped': False}
def to_wrap(test_param):
self.assertEqual('test_value', test_param)
test['wrapped'] = True
return '<rst stat="ok"><element photo_id="5" /></rst>'
rst = self.f._FlickrAPI__wrap_in_parser(to_wrap, parse_format='xmlnode',
test_param='test_value')
self.assertEqual('5', rst.element[0]['photo_id'])
self.assertTrue(test['wrapped'],
'Expected wrapped function to be called')
class CachingTest(SuperTest):
'''Tests that the caching framework works'''
def test_cache_write(self):
'''tests that the call result is written to cache'''
photo_id = '2333478006'
cache_key = ('api_key=%s'
'&photo_id=%s'
'&method=flickr.photos.getInfo'
'&format=rest' % (key, photo_id))
f = flickrapi.FlickrAPI(key, store_token=False, format='rest')
f.cache = flickrapi.SimpleCache()
self.assertEqual(0, len(f.cache))
info = f.photos_getInfo(photo_id=photo_id)
self.assertEqual(info, f.cache.get(cache_key))
def test_cache_read(self):
'''Tests that cached data is returned if available'''
photo_id = '2333478006'
cache_key = ('api_key=%s'
'&photo_id=%s'
'&method=flickr.photos.getInfo'
'&format=rest' % (key, photo_id))
faked_value = "FAKED_VALUE"
f = flickrapi.FlickrAPI(key, store_token=False, format='rest')
f.cache = flickrapi.SimpleCache()
f.cache.set(cache_key, faked_value)
info = f.photos_getInfo(photo_id=photo_id)
self.assertEqual(faked_value, info)
def test_cache_constructor_parameter(self):
'''Tests that a cache is created when requested.'''
f = flickrapi.FlickrAPI(key, cache=True)
self.assertNotEqual(None, f.cache, "Cache should not be None")
# Test list of non-cacheable method calls
class FormatsTest(SuperTest):
'''Tests the different parsed formats.'''
def test_default_format(self):
'''Test that the default format is etree'''
f = flickrapi.FlickrAPI(key)
etree = f.photos_getInfo(photo_id=u'2333478006')
self.assertEqual(etree_package(), etree.__module__)
def test_etree_format_happy(self):
'''Test ETree format'''
etree = self.f_noauth.photos_getInfo(photo_id=u'2333478006',
format='etree')
self.assertEqual(etree_package(), etree.__module__)
def test_etree_format_error(self):
'''Test ETree format in error conditions'''
self.assertRaises(flickrapi.exceptions.FlickrError,
self.f_noauth.photos_getInfo, format='etree')
def test_etree_default_format(self):
'''Test setting the default format to etree'''
f = flickrapi.FlickrAPI(key, format='etree')
etree = f.photos_getInfo(photo_id=u'2333478006')
self.assertEqual(etree_package(), etree.__module__)
def test_xmlnode_format(self):
'''Test XMLNode format'''
node = self.f_noauth.photos_getInfo(photo_id=u'2333478006',
format='xmlnode')
self.assertNotEqual(None, node.photo[0])
def test_xmlnode_format_error(self):
'''Test XMLNode format in error conditions'''
self.assertRaises(flickrapi.exceptions.FlickrError,
self.f_noauth.photos_getInfo, format='xmlnode')
def test_explicit_format(self):
'''Test explicitly requesting a certain unparsed format'''
xml = self.f.photos_search(tags='kitten', format='rest')
self.assertTrue(isinstance(xml, basestring))
# Try to parse it
rst = flickrapi.XMLNode.parse(xml, False)
self.assertTrue(int(rst.photos[0]['total']) > 0)
class SigningTest(SuperTest):
'''Tests the signing of different arguments.'''
def testSimple(self):
'''Simple arguments, just ASCII'''
signed = self.f.sign({'abc': 'def'})
self.assertEqual('9f215401af1a35e89da67a01be2333d2', signed)
# Order shouldn't matter
signed = self.f.sign({'abc': 'def', 'foo': 'bar'})
self.assertEqual('57ca69551c24c9c9ce2e2b5c832e61af', signed)
signed = self.f.sign({'foo': 'bar', 'abc': 'def'})
self.assertEqual('57ca69551c24c9c9ce2e2b5c832e61af', signed)
def testUnicode(self):
'''Test signing of Unicode data'''
# Unicode can't be signed directly
self.assertRaises(flickrapi.IllegalArgumentException, self.f.sign, {'abc': u'def'})
# But converted to UTF-8 works just fine
signed = self.f.sign({'abc': u'def'.encode('utf-8')})
self.assertEqual('9f215401af1a35e89da67a01be2333d2', signed)
# Non-ASCII data should work too
data = EURO_UNICODE + U_UML_UNICODE
signed = self.f.sign({'abc': data.encode('utf-8')})
self.assertEqual('51188be8b03d1ee892ade48631bfc0fd', signed)
# Straight UTF-8 should work too
data = EURO_UTF8 + U_UML_UTF8
signed = self.f.sign({'abc': data})
self.assertEqual('51188be8b03d1ee892ade48631bfc0fd', signed)
class EncodingTest(SuperTest):
'''Test URL encoding + signing of data. Tests using sets, because
we don't know in advance in which order the arguments will show up,
and we don't care about that anyway.
'''
def testSimple(self):
'''Test simple ASCII-only data'''
encoded = self.f.encode_and_sign({'abc': 'def', 'foo': 'bar'})
expected = set(['abc=def',
'foo=bar',
'api_sig=57ca69551c24c9c9ce2e2b5c832e61af'
])
self.assertEqual(expected, set(encoded.split('&')))
# Order shouldn't matter for the signature
encoded = self.f.encode_and_sign({'foo': 'bar', 'abc': 'def'})
self.assertEqual(expected, set(encoded.split('&')))
def testUnicode(self):
'''Test Unicode data'''
# Unicode strings with ASCII data only should result in the
# same as in the testSimple() test.
encoded = self.f.encode_and_sign({'abc': u'def', 'foo': u'bar'})
expected = set(['abc=def',
'foo=bar',
'api_sig=57ca69551c24c9c9ce2e2b5c832e61af'
])
self.assertEqual(expected, set(encoded.split('&')))
# Non-ASCII UTF-8 data should work too
# EURO = 0xE2 0x82 0xAC in UTF-8
# U_UML = 0xC3 0xBC in UTF-8
data = EURO_UNICODE + U_UML_UNICODE
encoded = self.f.encode_and_sign({'abc': data.encode('utf-8')})
expected = set(['abc=%E2%82%AC%C3%BC',
'api_sig=51188be8b03d1ee892ade48631bfc0fd'
])
self.assertEqual(expected, set(encoded.split('&')))
# Straight Unicode should work too
data = EURO_UNICODE + U_UML_UNICODE
encoded = self.f.encode_and_sign({'abc': data})
self.assertEqual(expected, set(encoded.split('&')))
def testNoSecret(self):
no_secret = flickrapi.FlickrAPI(key)
data = EURO_UNICODE + U_UML_UNICODE
encoded = no_secret.encode_and_sign({'abc': data})
self.assertEqual('abc=%E2%82%AC%C3%BC', encoded)
class DynamicMethodTest(SuperTest):
'''Tests the dynamic methods used to interface with Flickr.'''
class FakeUrllib(object):
'''Fake implementation of URLLib'''
def __init__(self):
self.data = None
self.url = None
def urlopen(self, url, postdata):
self.url = url
self.data = postdata
return StringIO.StringIO('''<?xml version="1.0" encoding="utf-8"?>
<rsp stat="ok"></rsp>''')
def __getattr__(self, name):
'''If we don't implement a method, call the original'''
if not hasattr(urllib, name):
raise AttributeError("No such attibute %s" % name)
return getattr(urllib, name)
#def original_caller(*args, **kwargs):
# original(*args, **kwargs)
def setUp(self):
super(DynamicMethodTest, self).setUp()
# Set fake urllib
self.fake_url_lib = self.FakeUrllib()
flickrapi.urllib = self.fake_url_lib
flickrapi.urllib2 = self.fake_url_lib
def tearDown(self):
super(DynamicMethodTest, self).tearDown()
# Restore original urllib
flickrapi.urllib = urllib
flickrapi.urllib2 = urllib2
def test_unicode_args(self):
'''Tests whether Unicode arguments are properly handled.
Tests using sets, since the order of the URL-encoded arguments
can't be ensured. The order isn't important anyway.
'''
# Plain ASCII should work
self.f.photos_setMeta(monkey='lord')
sent = set(self.fake_url_lib.data.split('&'))
expected = set(['api_key=%s' % key,
'monkey=lord',
'method=flickr.photos.setMeta',
'api_sig=edb3c60b63becf1738e2cd8fcc42834a',
'format=rest'
])
self.assertEquals(expected, sent)
# Unicode should work too
self.f.photos_setMeta(title='monkeylord',
description=EURO_UNICODE+U_UML_UNICODE)
sent = set(self.fake_url_lib.data.split('&'))
expected = set(['api_key=%s' % key,
'title=monkeylord',
'description=%E2%82%AC%C3%BC',
'method=flickr.photos.setMeta',
'api_sig=29fa7705fc721fded172a1c113304871',
'format=rest'
])
self.assertEquals(expected, sent)
def test_private_attribute(self):
'''Tests that we get an AttributeError when accessing an attribute
starting with __.
'''
self.assertRaises(AttributeError, getattr, self.f, '__get_photos')
def test_get_dynamic_method(self):
method = self.f.photos_setMeta
self.assertTrue(hasattr(method, '__call__'))
self.assertEquals('flickr.photos.setMeta', method.method)
# Test that we can get it again - should come from the cache,
# but no way to test that.
method = self.f.photos_setMeta
self.assertTrue(hasattr(method, '__call__'))
self.assertEquals('flickr.photos.setMeta', method.method)
class WalkerTest(SuperTest):
'''Tests walk* functions.'''
def test_walk_set(self):
# Check that we get a generator
gen = self.f.walk_set('72157611690250298', per_page=8)
self.assertEquals(types.GeneratorType, type(gen))
# I happen to know that that set contains 24 photos, and it is
# very unlikely that this will ever change (photos of a past
# event)
self.assertEquals(24, len(list(gen)))
def test_walk(self):
# Check that we get a generator
gen = self.f.walk(tag_mode='all',
tags='sybren,365,threesixtyfive,me',
min_taken_date='2008-08-20',
max_taken_date='2008-08-30', per_page=8,
sort='date-taken-desc')
self.assertEquals(types.GeneratorType, type(gen))
# very unlikely that this result will ever change
ids = [p.get('id') for p in gen]
self.assertEquals(['2824831549', '2807789315', '2807789039',
'2807773797', '2807772503', '2807771401', '2808616234',
'2808618120', '2808591736'], ids)
if __name__ == '__main__':
unittest.main()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.