code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#! /opt/local/bin/python2.6
import numpy as np
import time
import sys
from rwEXR import *
# 3744 23.9
# 5616 35.8
# distortion correction parameters in rad
# 0.207029529537 0.0422547753997
entrancePixel = (2323,5615-2427) #5
entrancePixel = (2622,5615-1935) #6
entrancePixel = (3123,5615-1859) #7
entrancePixel = (3479,5615-2224) #1
entrancePixel = (3341,5615-2345) #2
entrancePixel = (3127,5615-2538) #3
entrancePixel = (3616,5615-3037) #4
physicCenter = (1867.5,5615-2818.5)
#center = (1884.18,2834.89)
#center = np.array([2836.5,3744-1860.79])
#center = (1853,5615-2874.5)
distortionCenter = (1860.79,5615-2836.5)
distortionCenter = (1864,5615-2834)
#center = np.array([2836.5,3744-1860.79])
def Rd_distortion(pixel):
c = distortionCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
def Rd_physic(pixel):
c = physicCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
pix = np.sqrt(23.9*23.9 + 35.8*35.8)/np.sqrt(3744*3744+5616*5616)
print pix
#coeffs = [ -1.54035227e-12 , 6.94350405e-09 ,-1.22082544e-05, 9.47349416e-03,5.90440253e+00]
#coeffs = [ -1.94777642e-12 , 9.17138801e-09 , -1.59443120e-05 , 1.21270728e-02,5.15110092e+00]
coeffs = [ -1.48631764e-12 , 6.66110873e-09 , -1.16207010e-05 ,9.22816587e-03,5.78973689e+00]
coeffs = [ -2.69459533e-12, 1.19761622e-08 ,-1.99256846e-05 , 1.46539282e-02,4.54699550e+00]
poly_Rd2f = np.poly1d(coeffs)
f = poly_Rd2f(Rd_distortion(entrancePixel)) - 0.5
print Rd_distortion(entrancePixel)
print f
d = 79+89
deg=90.3
T1 = np.matrix([d*np.sin(deg*np.pi/180.0),+d*np.cos(deg*np.pi/180.0)-d,0])
#R = np.matrix([[0,-1,0],[1,0,0],[0,0,1]])
#e1 = np.matrix([-f,d+f,0])
#e2 = np.matrix([d+f,-f,0])
#theta = 2*np.arcsin( Rd_distortion(entrancePixel) / (2.0 * f/pix) )
#phi = np.arctan2( entrancePixel[1] - distortionCenter[1], entrancePixel[0] - distortionCenter[0] )
#op1 = np.matrix([ np.sin(theta)*np.cos(phi), np.cos(theta), np.sin(theta)*np.sin(phi)])
XX = (entrancePixel[0] - distortionCenter[0]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 3.5 * pix
ZZ = (entrancePixel[1] - distortionCenter[1]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 16.0 * pix
op1 = np.matrix([XX,8,ZZ])
#E = np.matrix( [[0,0,-d],[0,0,-d],[d,-d,0]])
N = np.cross(op1,T1)
print N
#N = (op1 * E)
N = N.flat
n1 = N[0]
n2 = N[1]
n3 = N[2]
#n4 = -n1*(d+8) + n2*8
y = np.hstack((np.arange(-1500,-500,5),np.arange(-500,-200,0.2),np.arange(-200,200,0.001),np.arange(200,500,0.2),np.arange(500,1500,5))) - 3.5 * pix
#z = (-n2*f-n2*y)/(n3) + 16 * pix
z = (n2*d - n1*(d+8 - np.cos(deg*np.pi/180.0)*y)/np.sin(deg*np.pi/180.0)-n2*y)/n3 - 16.0 * pix
ru = np.sqrt(z*z+y*y)
outPixel = (567,5615-2354) #5
outPixel = (1058,5615-1910) #6
outPixel = (1609,5615-1977) #7
outPixel = (1843,5615-2341) #1
outPixel = (1621,5615-2415) #2
outPixel = (1294,5615-2555) #3
outPixel = (2005,5615-2998) #4
f = poly_Rd2f(Rd_distortion(outPixel)) -0.5
print f
rd = 2*f*np.sin(np.arctan2(ru,f)/2)
U = -rd * np.cos(np.arctan2(z,y)) / pix
V = rd * np.sin(np.arctan2(z,y)) / pix
exrR,exrG,exrB,L,size = readExr("/Network/scratch/Tests/XL/DepthEstimationProject/sourceimages/HDRI/rotationFisheye/exr/vue1.exr")
for (u,v) in zip(U,V):
try:
L[u+physicCenter[0],v+physicCenter[1]] = 255
except:
pass
createNewOutputImage("epipolar.exr",L.T,L.T,L.T,size)
#print e1 * E
| Python |
#! /opt/local/bin/python2.6
import numpy as np
import time
import sys
from rwEXR import *
# 3744 23.9
# 5616 35.8
# distortion correction parameters in rad
# 0.207029529537 0.0422547753997
entrancePixel = (3473,5615-3043)
#entrancePixel = (1848,5615-2341)
#center = (1867.5,5616-2979.5)
#center = (1884.18,2834.89)
#center = np.array([2836.5,3744-1860.79])
center = (1853,5616-2874.5)
center = (1860.79,5615-2836.5)
#center = np.array([2836.5,3744-1860.79])
def Rd(pixel):
c = center
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
pix = 0.00637738462
coeffs = [ -1.54035227e-12 , 6.94350405e-09 , -1.22082544e-05 , 9.47349416e-03, 5.90440253e+00]
poly_Rd2f = np.poly1d(coeffs)
f = poly_Rd2f(Rd(entrancePixel))
print f
d = 79+89
T1 = np.matrix([d,-d,0])
R = np.matrix([[0,-1,0],[1,0,0],[0,0,1]])
e1 = np.matrix([-f,d+f,0])
e2 = np.matrix([d+f,-f,0])
theta = 2*np.arcsin( Rd(entrancePixel) / (2.0 * f/pix) )
phi = np.arctan2( entrancePixel[1] - center[1], entrancePixel[0] - center[0] )
op1 = np.matrix([ np.sin(theta)*np.cos(phi), np.cos(theta), np.sin(theta)*np.sin(phi)])
#E = np.matrix( [[0,0,-d],[0,0,-d],[d,-d,0]])
N = np.cross(op1,T1)
print N
#N = (op1 * E)
N = N.flat
n1 = N[0]
n2 = N[1]
n3 = N[2]
n4 = -n1*(d+f) + n2*f
y = np.arange(-1000,1000,0.1)
z = (-n2*f-n2*y)/(n3)
ru = np.sqrt(z*z+y*y)
outPixel = (1785,5615-3001)
f = poly_Rd2f(Rd(outPixel))
print f
rd = 2*f*np.sin(np.arctan2(ru,f)/2)
U = -rd * np.cos(np.arctan2(z,y)) * pix
V = rd * np.sin(np.arctan2(z,y)) * pix
exrR,exrG,exrB,L,size = readExr("/Network/scratch/Tests/XL/DepthEstimationProject/sourceimages/HDRI/rotationFisheye/exr/vue1.exr")
for (u,v) in zip(U,V):
L[u+1860.79,v+(5615-2836.5)] = 255
createNewOutputImage("epipolar.exr",L.T,L.T,L.T,size)
print e1 * E
| Python |
#! /opt/local/bin/python2.6
import numpy as np
import time
import sys
from rwEXR import *
# 3744 23.9
# 5616 35.8
# distortion correction parameters in rad
# 0.207029529537 0.0422547753997
entrancePixel = (3479,5615-2224) #1
entrancePixel = (3341,5615-2345) #2
#entrancePixel = (3127,5615-2538) #3
physicCenter = (1867.5,5615-2818.5)
#center = (1884.18,2834.89)
#center = np.array([2836.5,3744-1860.79])
#center = (1853,5615-2874.5)
distortionCenter = (1860.79,5615-2836.5)
distortionCenter = (1864,5615-2834)
#center = np.array([2836.5,3744-1860.79])
def Rd_distortion(pixel):
c = distortionCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
def Rd_physic(pixel):
c = physicCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
pix = np.sqrt(23.9*23.9 + 35.8*35.8)/np.sqrt(3744*3744+5616*5616)
print pix
#coeffs = [ -1.54035227e-12 , 6.94350405e-09 ,-1.22082544e-05, 9.47349416e-03,5.90440253e+00]
#coeffs = [ -1.94777642e-12 , 9.17138801e-09 , -1.59443120e-05 , 1.21270728e-02,5.15110092e+00]
coeffs = [ -1.48631764e-12 , 6.66110873e-09 , -1.16207010e-05 ,9.22816587e-03,5.78973689e+00]
coeffs = [ -2.69459533e-12, 1.19761622e-08 ,-1.99256846e-05 , 1.46539282e-02,4.54699550e+00]
poly_Rd2f = np.poly1d(coeffs)
f = poly_Rd2f(Rd_distortion(entrancePixel))
print Rd_distortion(entrancePixel)
print f
f=8
d = 79+89
deg=90.3
T1 = np.matrix([d*np.sin(deg*np.pi/180.0),+d*np.cos(deg*np.pi/180.0)-d,0])
#R = np.matrix([[0,-1,0],[1,0,0],[0,0,1]])
#e1 = np.matrix([-f,d+f,0])
#e2 = np.matrix([d+f,-f,0])
#theta = 2*np.arcsin( Rd_distortion(entrancePixel) / (2.0 * f/pix) )
#phi = np.arctan2( entrancePixel[1] - distortionCenter[1], entrancePixel[0] - distortionCenter[0] )
#op1 = np.matrix([ np.sin(theta)*np.cos(phi), np.cos(theta), np.sin(theta)*np.sin(phi)])
XX = (entrancePixel[0] - distortionCenter[0]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 7.5*pix
ZZ = (entrancePixel[1] - distortionCenter[1]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 16 * pix
op1 = np.matrix([XX,f,ZZ])
#E = np.matrix( [[0,0,-d],[0,0,-d],[d,-d,0]])
N = np.cross(op1,T1)
print N
#N = (op1 * E)
N = N.flat
n1 = N[0]
n2 = N[1]
n3 = N[2]
#n4 = -n1*(d+8) + n2*8
y = np.hstack((np.arange(-1500,-500,5),np.arange(-500,-200,0.2),np.arange(-200,200,0.001),np.arange(200,500,0.2),np.arange(500,1500,5))) + 7.5*pix
#z = (-n2*f-n2*y)/(n3) + 16 * pix
z = (n2*d - n1*(d+f - np.cos(deg*np.pi/180.0)*y)/np.sin(deg*np.pi/180.0)-n2*y)/n3 + 16*pix
ru = np.sqrt(z*z+y*y)
outPixel = (1843,5615-2341) #1
outPixel = (1621,5615-2415) #2
#outPixel = (1294,5615-2555) #3
f = poly_Rd2f(Rd_distortion(outPixel))
print f
f = 8
rd = 2*f*np.sin(np.arctan2(ru,f)/2)
U = -rd * np.cos(np.arctan2(z,y)) / pix
V = rd * np.sin(np.arctan2(z,y)) / pix
exrR,exrG,exrB,L,size = readExr("/Network/scratch/Tests/XL/DepthEstimationProject/sourceimages/HDRI/rotationFisheye/exr/vue1.exr")
for (u,v) in zip(U,V):
try:
L[u+distortionCenter[0]+7.5,v+distortionCenter[1] +16 ] = 255
except:
pass
createNewOutputImage("epipolar.exr",L.T,L.T,L.T,size)
#print e1 * E
| Python |
#! /opt/local/bin/python2.6
import numpy as np
import time
import sys
from rwEXR import *
# 3744 23.9
# 5616 35.8
# distortion correction parameters in rad
# 0.207029529537 0.0422547753997
entrancePixel = (3341,5615-2345)
#entrancePixel = (1867.5 + 300,2979.5)
center = (1867.5,2979,5)
f = 8
d = 89+79
T1 = np.matrix([d,-d,0])
R = np.matrix([[0,-1,0],[1,0,0],[0,0,1]])
e1 = np.matrix([-f,d+f,0])
e2 = np.matrix([d+f,-f,0])
def Rd(pixel):
c = (1867.5,2979,5)
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
theta = 2*np.arcsin( Rd(entrancePixel) * (23.9/3744.0) / (2.0 * f) )
phi = np.arctan2( entrancePixel[1] - center[1], entrancePixel[0] - center[0] )
op1 = np.matrix([ np.sin(theta)*np.cos(phi), np.cos(theta), np.sin(theta)*np.sin(phi)])
E = np.matrix( [[0,0,-d],[0,0,-d],[d,-d,0]])
N = np.cross(op1,T1)
print N
#N = (op1 * E)
N = N.flat
n1 = N[0]
n2 = N[1]
n3 = N[2]
n4 = -n1*(d+f) + n2*f
y = np.arange(-100,100,0.1)
z = (-n2*f-n2*y)/(n3)
ru = np.sqrt(z*z+y*y)
rd = 2*f*np.sin(np.arctan2(ru,f)/2)
U = -rd * np.cos(np.arctan2(z,y)) * 3744/23.9
V = rd * np.sin(np.arctan2(z,y)) * 3744/23.9
exrR,exrG,exrB,L,size = readExr("/Network/scratch/Tests/XL/DepthEstimationProject/sourceimages/HDRI/rotationFisheye/exr/vue1.exr")
for (u,v) in zip(U,V):
exrR[u+1867.5,v+(5616-2979.5)] = 0
exrG[u+1867.5,v+(5616-2979.5)] = 1
exrB[u+1867.5,v+(5616-2979.5)] = 0
createNewOutputImage("epipolar.exr",exrR.T,exrG.T,exrB.T,size)
print e1 * E
| Python |
#! /opt/local/bin/python2.6
import numpy as np
import time
import sys
from rwEXR import *
# 3744 23.9
# 5616 35.8
# distortion correction parameters in rad
# 0.207029529537 0.0422547753997
entrancePixel = (2323,5615-2427) #5
entrancePixel = (2622,5615-1935) #6
entrancePixel = (3123,5615-1859) #7
entrancePixel = (3479,5615-2224) #1
entrancePixel = (3341,5615-2345) #2
entrancePixel = (3127,5615-2538) #3
entrancePixel = (3616,5615-3037) #4
physicCenter = (1867.5,5615-2818.5)
#center = (1884.18,2834.89)
#center = np.array([2836.5,3744-1860.79])
#center = (1853,5615-2874.5)
distortionCenter = (1860.79,5615-2836.5)
distortionCenter = (1864,5615-2834)
#center = np.array([2836.5,3744-1860.79])
def Rd_distortion(pixel):
c = distortionCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
def Rd_physic(pixel):
c = physicCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
pix = np.sqrt(23.9*23.9 + 35.8*35.8)/np.sqrt(3744*3744+5616*5616)
print pix
#coeffs = [ -1.54035227e-12 , 6.94350405e-09 ,-1.22082544e-05, 9.47349416e-03,5.90440253e+00]
#coeffs = [ -1.94777642e-12 , 9.17138801e-09 , -1.59443120e-05 , 1.21270728e-02,5.15110092e+00]
coeffs = [ -1.48631764e-12 , 6.66110873e-09 , -1.16207010e-05 ,9.22816587e-03,5.78973689e+00]
coeffs = [ -2.69459533e-12, 1.19761622e-08 ,-1.99256846e-05 , 1.46539282e-02,4.54699550e+00]
poly_Rd2f = np.poly1d(coeffs)
f = poly_Rd2f(Rd_distortion(entrancePixel)) - 0.5
print Rd_distortion(entrancePixel)
print f
d = 79+89
deg=90.3
T1 = np.matrix([d*np.sin(deg*np.pi/180.0),+d*np.cos(deg*np.pi/180.0)-d,0])
#R = np.matrix([[0,-1,0],[1,0,0],[0,0,1]])
#e1 = np.matrix([-f,d+f,0])
#e2 = np.matrix([d+f,-f,0])
#theta = 2*np.arcsin( Rd_distortion(entrancePixel) / (2.0 * f/pix) )
#phi = np.arctan2( entrancePixel[1] - distortionCenter[1], entrancePixel[0] - distortionCenter[0] )
#op1 = np.matrix([ np.sin(theta)*np.cos(phi), np.cos(theta), np.sin(theta)*np.sin(phi)])
XX = (entrancePixel[0] - distortionCenter[0]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 3.5 * pix
ZZ = (entrancePixel[1] - distortionCenter[1]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 16.0 * pix
op1 = np.matrix([XX,8,ZZ])
#E = np.matrix( [[0,0,-d],[0,0,-d],[d,-d,0]])
N = np.cross(op1,T1)
print N
#N = (op1 * E)
N = N.flat
n1 = N[0]
n2 = N[1]
n3 = N[2]
#n4 = -n1*(d+8) + n2*8
y = np.hstack((np.arange(-1500,-500,5),np.arange(-500,-200,0.2),np.arange(-200,200,0.001),np.arange(200,500,0.2),np.arange(500,1500,5))) - 3.5 * pix
#z = (-n2*f-n2*y)/(n3) + 16 * pix
z = (n2*d - n1*(d+8 - np.cos(deg*np.pi/180.0)*y)/np.sin(deg*np.pi/180.0)-n2*y)/n3 - 16.0 * pix
ru = np.sqrt(z*z+y*y)
outPixel = (567,5615-2354) #5
outPixel = (1058,5615-1910) #6
outPixel = (1609,5615-1977) #7
outPixel = (1843,5615-2341) #1
outPixel = (1621,5615-2415) #2
outPixel = (1294,5615-2555) #3
outPixel = (2005,5615-2998) #4
f = poly_Rd2f(Rd_distortion(outPixel)) -0.5
print f
rd = 2*f*np.sin(np.arctan2(ru,f)/2)
U = -rd * np.cos(np.arctan2(z,y)) / pix
V = rd * np.sin(np.arctan2(z,y)) / pix
exrR,exrG,exrB,L,size = readExr("/Network/scratch/Tests/XL/DepthEstimationProject/sourceimages/HDRI/rotationFisheye/exr/vue1.exr")
for (u,v) in zip(U,V):
try:
L[u+physicCenter[0],v+physicCenter[1]] = 255
except:
pass
createNewOutputImage("epipolar.exr",L.T,L.T,L.T,size)
#print e1 * E
| Python |
#! /opt/local/bin/python2.6
import numpy as np
import time
import sys
from rwEXR import *
# 3744 23.9
# 5616 35.8
# distortion correction parameters in rad
# 0.207029529537 0.0422547753997
entrancePixel = (3341,5615-2345)
#entrancePixel = (1848,5615-2341)
physicCenter = (1867.5,5615-2979.5)
#center = (1884.18,2834.89)
#center = np.array([2836.5,3744-1860.79])
#center = (1853,5615-2874.5)
distortionCenter = (1860.79,2836.5)
#center = np.array([2836.5,3744-1860.79])
def Rd_distortion(pixel):
c = distortionCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
def Rd_physic(pixel):
c = physicCenter
return np.sqrt( (pixel[0] - c[0])*(pixel[0] - c[0]) + (pixel[1] - c[1])*(pixel[1] - c[1]) )
pix = np.sqrt(23.9*23.9 + 35.8*35.8)/np.sqrt(3744*3744+5616*5616)
print pix
coeffs = [ -1.54035227e-12 , 6.94350405e-09 ,-1.22082544e-05, 9.47349416e-03,5.90440253e+00]
#coeffs = [ -1.94777642e-12 , 9.17138801e-09 , -1.59443120e-05 , 1.21270728e-02,5.15110092e+00]
coeffs = [ -1.48631764e-12 , 6.66110873e-09 , -1.16207010e-05 ,9.22816587e-03,5.78973689e+00]
poly_Rd2f = np.poly1d(coeffs)
f = poly_Rd2f(Rd_distortion(entrancePixel))
print f
#f = 8
d = 79+89
T1 = np.matrix([d*np.sin(82*np.pi/180.0),+d*np.cos(82*np.pi/180.0)-d,0])
#R = np.matrix([[0,-1,0],[1,0,0],[0,0,1]])
#e1 = np.matrix([-f,d+f,0])
#e2 = np.matrix([d+f,-f,0])
theta = 2*np.arcsin( Rd_distortion(entrancePixel) / (2.0 * f/pix) )
phi = np.arctan2( entrancePixel[1] - distortionCenter[1], entrancePixel[0] - distortionCenter[0] )
op1 = np.matrix([ np.sin(theta)*np.cos(phi), np.cos(theta), np.sin(theta)*np.sin(phi)])
print op1
XX = (entrancePixel[0] - distortionCenter[0]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 7.5*pix
ZZ = (entrancePixel[1] - distortionCenter[1]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix))) + 143*pix
op1 = np.matrix([XX,8,ZZ])
#E = np.matrix( [[0,0,-d],[0,0,-d],[d,-d,0]])
print (entrancePixel[0] - distortionCenter[0]) / Rd_distortion(entrancePixel)*f * np.tan(2.0*np.arcsin(Rd_distortion(entrancePixel)/(2.0*f/pix)))
N = np.cross(op1,T1)
print N
#N = (op1 * E)
N = N.flat
n1 = N[0]
n2 = N[1]
n3 = N[2]
#n4 = -n1*(d+8) + n2*8
y = np.arange(-1000,1000,0.1)
z = (-n2*8-n2*y)/(n3)
ru = np.sqrt(z*z+y*y)
outPixel = (1621,5615-2415)
f = poly_Rd2f(Rd_distortion(outPixel))
print f
rd = 2*f*np.sin(np.arctan2(ru,f)/2)
U = -rd * np.cos(np.arctan2(z,y)) / pix
V = rd * np.sin(np.arctan2(z,y)) / pix
exrR,exrG,exrB,L,size = readExr("/Network/scratch/Tests/XL/DepthEstimationProject/sourceimages/HDRI/rotationFisheye/exr/vue1.exr")
for (u,v) in zip(U,V):
L[u+distortionCenter[0]- 7.5*pix,v+distortionCenter[1]- 143*pix] = 255
createNewOutputImage("epipolar.exr",L.T,L.T,L.T,size)
print e1 * E
| Python |
import OpenEXR
import Imath
import math
import numpy as np
def readExr(exrfile):
exrfile = str(exrfile)
file = OpenEXR.InputFile(exrfile)
pt = Imath.PixelType(Imath.PixelType.FLOAT)
dw = file.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
R,G,B = [np.fromstring( file.channel(c, pt), dtype = np.float32) for c in ("R","G","B")]
R = R.reshape(size[1],size[0]).T
G = G.reshape(size[1],size[0]).T
B = B.reshape(size[1],size[0]).T
L = 0.2125*R + 0.7154*G + 0.0721*B
return R,G,B,L,size | Python |
import OpenEXR
import Imath
import math
import time
import numpy as np
def writeEXR(fileName,outputR,outputG,outputB, sz):
print "Writing "+fileName
(strR, strG, strB) = [Channel.tostring() for Channel in (outputR, outputG, outputB)]
patchOut = OpenEXR.OutputFile(fileName, OpenEXR.Header(sz[0], sz[1]))
patchOut.writePixels({'R' : strR, 'G' : strG, 'B' : strB}) | Python |
import os, sys
import OpenEXR
import Imath
import math
import time
import numpy
from numpy import array
np = numpy
import myextension
def readExr(exrfile):
exrfile = str(exrfile)
file = OpenEXR.InputFile(exrfile)
pt = Imath.PixelType(Imath.PixelType.FLOAT)
dw = file.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
R,G,B = [numpy.fromstring( file.channel(c, pt), dtype = numpy.float32) for c in ("R","G","B")]
R = R.reshape(size[1],size[0]).T
G = G.reshape(size[1],size[0]).T
B = B.reshape(size[1],size[0]).T
L = 0.2125*R + 0.7154*G + 0.0721*B
return R,G,B,L,size
def createNewOutputImage(fileName,outputR,outputG,outputB, sz):
print fileName
(strR, strG, strB) = [Channel.tostring() for Channel in (outputR, outputG, outputB)]
patchOut = OpenEXR.OutputFile(fileName, OpenEXR.Header(sz[0], sz[1]))
patchOut.writePixels({'R' : strR, 'G' : strG, 'B' : strB})
def positionFeature(labels, Nth):
tmp = np.where(labels == Nth)
return np.mean(tmp[0]),np.std(tmp[0]),np.mean(tmp[1]),np.std(tmp[1])
def colorFeature(L,labels, Nth):
tmp = np.where(labels == Nth, L)
return np.mean(tmp),np.std(tmp)
def features( L,labels, Nth ):
tmp = np.where(labels == Nth)
x_pos = tmp[0]
y_pos = tmp[1]
col = L[np.where(labels == Nth)]
return np.cov(np.vstack((x_pos,y_pos,col))),np.mean(tmp[0]),np.mean(tmp[1]),np.mean(col)
def featuresRGB( R, G, B, labels, Nth ):
tmp = np.where(labels == Nth)
x_pos = np.array(tmp[0],dtype=np.double)
y_pos = np.array(tmp[1],dtype=np.double)
_R = R[tmp]
_G = G[tmp]
_B = B[tmp]
return np.cov(np.vstack((x_pos,y_pos,_R,_G,_B))),np.mean(x_pos,dtype=np.double),np.mean(y_pos,dtype=np.double),np.mean(_R,dtype=np.double),np.mean(_G,dtype=np.double),np.mean(_B,dtype=np.double)
def proba_dl_thetam(dl, miu, covarMatrix):
V = np.matrix(dl - np.matrix(miu))
exp = V* np.matrix(np.linalg.inv(covarMatrix))*V.T
return np.exp(-0.5*exp)/(np.power(2*np.pi,5)*np.linalg.det(covarMatrix))
def prob_m_dl(m,pi_m, dl, miuArray,covarMatrixArray):
tmp1 = pi_m * proba_dl_thetam(dl,miuArray[m],covarMatrixArray[m])
if m == 0:
tmp2 = pi_m * proba_dl_thetam(dl,miuArray[0],covarMatrixArray[0]) + (1-pi_m) * proba_dl_thetam(dl,miuArray[1],covarMatrixArray[1])
elif m == 1:
tmp2 = (1-pi_m) * proba_dl_thetam(dl,miuArray[0],covarMatrixArray[0]) + pi_m * proba_dl_thetam(dl,miuArray[1],covarMatrixArray[1])
return float(tmp1/tmp2)
def pi_m__t_plus_1(R,G,B,m,pi_m,miuArray,covarMatrixArray):
pixels_count = R.shape[0]*R.shape[1]
sum = 0
for x in xrange(R.shape[0]):
for y in xrange(R.shape[1]):
dl = np.matrix([x,y,R[x][y],G[x][y],B[x][y]])
sum = sum + prob_m_dl(m,pi_m[m], dl, miuArray,covarMatrixArray)
return sum/pixels_count
def miu_m__t_plus_1(R,G,B,m,pi_m,miuArray,covarMatrixArray):
sum1 = np.matrix([0,0,0,0,0])
sum2 =0
for x in xrange(R.shape[0]):
for y in xrange(R.shape[1]):
dl = np.matrix([x,y,R[x][y],G[x][y],B[x][y]])
tmp = prob_m_dl(m,pi_m, dl, miuArray,covarMatrixArray)
sum1 = sum1 + dl * tmp
sum2 = sum2 + tmp
return sum1/sum2
def covarMatrix_m__t_plus_1(R,G,B,m,pi_m,miuArray,covarMatrix):
sum1 = np.matrix(np.zeros((5,5)))
sum2 =0
for x in xrange(R.shape[0]):
for y in xrange(R.shape[1]):
dl = np.matrix([x,y,R[x][y],G[x][y],B[x][y]])
miu_m = np.matrix(miuArray)
tmp = prob_m_dl(m,pi_m, dl, miuArray,covarMatrix)
sum1 = sum1 + (dl - miu_m).T * (dl - miu_m) * tmp
sum2 = sum2 + tmp
return sum1/sum2
if __name__ == "__main__":
if len(sys.argv) < 2:
print "no image input"
sys.exit(1)
image = sys.argv[1]
n_labels = 2
R,G,B,L,size = readExr(image)
########
R = np.array(R,dtype = np.double)
G = np.array(G,dtype = np.double)
B = np.array(B,dtype = np.double)
L = np.array(L,dtype = np.double)
########
print image,size
#initialisation of labels
#labels = np.array(np.random.randint(n_labels,size=size),dtype=np.double)
labels = np.ones(size,dtype=np.double)
# sunflower
labels[115:293,(492-378):(492-327)] = 0
labels[156:264,(492-182):(492-128)] = 0
labels[78:135,(492-302):(492-201)] = 0
labels[287:324,(492-322):(492-207)] = 0
#eye.exr
#labels[81:142,(185-103):(185-49)] = 0
#eye_small.exr
#labels[15:29,(36-20):(36-9)] = 0
#Pixar05.exr
#labels[119:205,(702-227):(702-63)] = 0
#labels[446:495,(702-438):(702-420)] = 0
#pixar.exr
#labels[50:91,(146-92):(146-44)] = 0
#pixar_creation.exr
#labels[552:615,(511-229):(511-190)] = 0
#labels[62:97,(43-39):(43-29)] = 0
#labels[23:39,(59-42):(59-12)] = 0
createNewOutputImage("../../images/label0.exr",np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T, size)
maxflow = 0
for k in xrange(0):
inversedCovarianceMatrixArray = []
miuArray = []
lnCovarMatDet = []
try:
for i in xrange(n_labels):
covarMatrix, x,y,lum = features(L,labels,i)
inversedCovarianceMatrixArray.append(np.linalg.inv(covarMatrix))
miuArray.append((x,y,lum))
lnCovarMatDet.append(np.log(np.sqrt(8 * np.pi*np.pi*np.pi * np.linalg.det(covarMatrix))))
inversedCovarianceMatrixArray = np.array(inversedCovarianceMatrixArray).reshape((n_labels,3,3))
miuArray = np.array(miuArray).reshape((n_labels,3))
lnCovarMatDet = np.array(lnCovarMatDet).reshape(n_labels)
except:
print "exception"
break
flow = myextension.quickGraphCut(n_labels, L, labels, miuArray, inversedCovarianceMatrixArray,lnCovarMatDet)
createNewOutputImage("../../images/label"+str(k+1)+".exr",np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T, size)
if flow > maxflow:
maxflow = flow
else:
pass
#break
for k in xrange(6):
inversedCovarianceMatrixArray = []
miuArray = []
lnCovarMatDet = []
covarMatrixArray = []
for i in xrange(n_labels):
covarMatrix, x, y, r, g, b = featuresRGB(R,G,B,labels,i)
inversedCovarianceMatrixArray.append(np.linalg.inv(covarMatrix))
miuArray.append((x,y,r,g,b))
lnCovarMatDet.append(np.log(np.sqrt( np.power(2*np.pi,5) * np.linalg.det(covarMatrix))))
covarMatrixArray.append(covarMatrix)
inversedCovarianceMatrixArray = np.array(inversedCovarianceMatrixArray,dtype = np.double).reshape((n_labels,5,5))
miuArray = np.array(miuArray,dtype = np.double).reshape((n_labels,5))
lnCovarMatDet = np.array(lnCovarMatDet,dtype = np.double).reshape(n_labels)
flow = myextension.quickGraphCut(n_labels, R,G,B, labels, miuArray, inversedCovarianceMatrixArray,lnCovarMatDet)
if flow > maxflow:
maxflow = flow
else:
pass
#break
createNewOutputImage("../../images/label"+str(k+1)+".exr",np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T, size)
Pr_t_m = list()
Pr_t_m.append( 0.5)
Pr_t_m.append( 0.5)
Pr_t_m = np.array(Pr_t_m,dtype = np.double)
Pr_t_plus_1_m = list()
Pr_t_plus_1_m.append( 0.5)
Pr_t_plus_1_m.append( 0.5)
Pr_t_plus_1_m = np.array(Pr_t_plus_1_m,dtype = np.double)
inversedCovarianceMatrixArray = []
lnCovarMatDet = [1,1]
miuArray = []
covarMatrixArray = []
for i in xrange(n_labels):
covarMatrix, x, y, r, g, b = featuresRGB(R,G,B,labels,i)
miuArray.append((x,y,r,g,b))
covarMatrixArray.append(covarMatrix)
inversedCovarianceMatrixArray.append(np.linalg.inv(covarMatrix))
miuArray = np.array(miuArray,dtype = np.double).reshape((n_labels,5))
covarMatrixArray = np.array(covarMatrixArray,dtype = np.double).reshape((n_labels,5,5))
inversedCovarianceMatrixArray = np.array(inversedCovarianceMatrixArray,dtype = np.double).reshape((n_labels,5,5))
old_miuArray = np.array(miuArray,dtype = np.double).reshape((n_labels,5))
new_miuArray = np.array(miuArray,dtype = np.double).reshape((n_labels,5))
old_covarMatrixArray = np.array(covarMatrixArray,dtype = np.double).reshape((n_labels,5,5))
new_covarMatrixArray = np.array(covarMatrixArray,dtype = np.double).reshape((n_labels,5,5))
covarMatDet = np.array([np.linalg.det(old_covarMatrixArray[0]),np.linalg.det(old_covarMatrixArray[1])],dtype = np.double)
for k in xrange(0):
for i in xrange(n_labels):
pass
"""
#covarMatrix, x, y, r, g, b = featuresRGB(R,G,B,labels,i)
#miu = np.matrix([x,y,r,g,b])
miu = np.matrix( old_miuArray[i])
covarMatrix = old_covarMatrixArray[i]
# New Miu
sum1 = np.matrix([0,0,0,0,0])
sum2 =0
#New covarMatrix
sum3 = np.matrix(np.zeros((5,5)))
#New pr_t_m
pixels_count = R.shape[0]*R.shape[1]
#dl = [[x,y,R[x,y],G[x,y],B[x,y]] for x in xrange(R.shape[0]) for y in xrange(R.shape[1]) ]
for x in xrange(R.shape[0]):
for y in xrange(R.shape[1]):
dl = np.matrix([x,y,R[x,y],G[x,y],B[x,y]])
tmp = prob_m_dl(i,Pr_t_m[i], dl, old_miuArray,old_covarMatrixArray) # reponsibility P(m | dl)
sum1 = sum1 + dl * tmp
sum2 = sum2 + tmp
sum3 = sum3 + (dl - miu).T * (dl - miu) * tmp
print sum2
new_miu = sum1/sum2
print "miu ",miu
print "new miu",new_miu
miu = new_miu
new_miuArray[i]=miu
new_covarMatrix = sum3/sum2
new_covarMatrixArray[i]=(new_covarMatrix)
print old_covarMatrixArray[i]
print new_covarMatrix
Pr_t_plus_1_m[i] = sum2/pixels_count
inversedCovarianceMatrixArray[i] = np.linalg.inv(new_covarMatrix)
lnCovarMatDet[i]=np.log(np.sqrt(np.power(2*np.pi,5) * np.linalg.det(new_covarMatrix)))
"""
myextension.EMProcess(Pr_t_m, Pr_t_plus_1_m, R, G, B, old_miuArray, new_miuArray, inversedCovarianceMatrixArray, new_covarMatrixArray, covarMatDet)
print old_miuArray
print new_miuArray
old_miuArray = np.array(new_miuArray,dtype = np.double)
old_covarMatrixArray=np.array(new_covarMatrixArray,dtype=np.double)
Pr_t_m = np.array(Pr_t_plus_1_m,dtype=np.double)
covarMatDet = np.array([np.linalg.det(new_covarMatrixArray[0]),np.linalg.det(new_covarMatrixArray[1])])
inversedCovarianceMatrixArray[0] = np.linalg.inv(new_covarMatrixArray[0])
inversedCovarianceMatrixArray[1] = np.linalg.inv(new_covarMatrixArray[1])
inversedCovarianceMatrixArray = np.array(inversedCovarianceMatrixArray,dtype = np.double).reshape((n_labels,5,5))
miuArray = np.array(new_miuArray,dtype=np.double).reshape((n_labels,5))
lnCovarMatDet[0] = np.log(np.sqrt(np.power(2*np.pi,5) * np.linalg.det(old_covarMatrixArray[0])))
lnCovarMatDet[1] = np.log(np.sqrt(np.power(2*np.pi,5) * np.linalg.det(old_covarMatrixArray[1])))
lnCovarMatDet = np.array(lnCovarMatDet,dtype=np.double).reshape(n_labels)
flow = myextension.quickGraphCut(n_labels, R,G,B, labels, miuArray, inversedCovarianceMatrixArray,lnCovarMatDet)
if flow > maxflow:
maxflow = flow
else:
pass
#break
createNewOutputImage("../../images/label"+str(k+1)+".exr",np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T, size)
# Here begins the alpha expansion, we extract at first the contour of the initial alpha regions as active nodes ( because inside of alpha regions, the Pott cost is 0). the initial alpha region is difined by a rectangle (labels). so the contour is four edges of the rectangle.
alphaRegion = np.where(labels == 0) # extract alpha Region
max_X = np.max(alphaRegion[0])
min_X = np.min(alphaRegion[0])
max_Y = np.max(alphaRegion[1])
min_Y = np.min(alphaRegion[1])
activeContour = [ [x,min_Y] for x in xrange(min_X,max_X+1) ] + [ [x,max_Y] for x in xrange(min_X,max_X+1) ] + \
[ [min_X,y] for y in xrange(min_Y,max_Y+1) ] + [ [min_X,y] for x in xrange(min_Y,max_Y+1) ]
activeContour = np.array(activeContour, dtype = np.double)
print activeContour.shape
#myextension.alphaExpansionQuickGraphCut(R,G,B,labels,miuArray,inversedCovarianceMatrixArray,lnCovarMatDet,activeContour)
#createNewOutputImage("label_expansion.exr",np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T,np.array(labels,dtype=np.float32).T, size)
| Python |
import numpy as N
import ctypes as C
ctypesDict = {'d' : C.c_double,
'b' : C.c_char,
'h' : C.c_short,
'i' : C.c_int,
'l' : C.c_long,
'q' : C.c_longlong,
'B' : C.c_ubyte,
'H' : C.c_ushort,
'I' : C.c_uint,
'L' : C.c_ulong,
'Q' : C.c_ulonglong}
def c_ndarray(a, dtype = None, ndim = None, shape = None, requirements = None):
"""
PURPOSE: Given an array, return a ctypes structure containing the
arrays info (data, shape, strides, ndim). A check is made to ensure
that the array has the specified dtype and requirements.
INPUT: a: an array: something that is or can be converted to a numpy array
dtype: the required dtype of the array, convert if it doesn't match
ndim: integer: the required number of axes of the array
shape: tuple of integers: required shape of the array
requirements: list of requirements: (E)nsurearray, (F)ortran, (F)_contiguous,
(C)ontiguous, (C)_contiguous. Convert if it doesn't match.
OUTPUT: ctypes structure with the fields:
. data: pointer to the data : the type is determined with the dtype of the
array, and with ctypesDict.
. shape: pointer to long array : size of each of the dimensions
. strides: pointer to long array : strides in elements (not bytes)
"""
if not requirements:
# Also allow derived classes of ndarray
array = N.asanyarray(a, dtype=dtype)
else:
# Convert requirements to captial letter codes:
# (ensurearray' -> 'E'; 'aligned' -> 'A'
# 'fortran', 'f_contiguous', 'f' -> 'F'
# 'contiguous', 'c_contiguous', 'c' -> 'C')
requirements = [x[0].upper() for x in requirements]
subok = (0 if 'E' in requirements else 1)
# Make from 'a' an ndarray with the specified dtype, but don't copy the
# data (yet). This also ensures that the .flags attribute is present.
array = N.array(a, dtype=dtype, copy=False, subok=subok)
# See if copying all data is really necessary.
# Note: 'A' = (A)ny = only (F) it is was already (F)
copychar = 'A'
if 'F' in requirements:
copychar = 'F'
elif 'C' in requirements:
copychar = 'C'
for req in requirements:
if not array.flags[req]:
array = array.copy(copychar)
break
# If required, check the number of axes and the shape of the array
if ndim is not None:
if array.ndim != ndim:
raise TypeError, "Array has wrong number of axes"
if shape is not None:
if array.shape != shape:
raise TypeError, "Array has wrong shape"
# Define a class that serves as interface of an ndarray to ctypes.
# Part of the type depends on the array's dtype.
class ndarrayInterfaceToCtypes(C.Structure):
pass
typechar = array.dtype.char
if typechar in ctypesDict:
ndarrayInterfaceToCtypes._fields_ = \
[("data", C.POINTER(ctypesDict[typechar])),
("shape" , C.POINTER(C.c_long)),
("strides", C.POINTER(C.c_long))]
else:
raise TypeError, "dtype of input ndarray not supported"
# Instantiate the interface class and attach the ndarray's internal info.
# Ctypes does automatic conversion between (c_long * #) arrays and POINTER(c_long).
ndarrayInterface = ndarrayInterfaceToCtypes()
ndarrayInterface.data = array.ctypes.data_as(C.POINTER(ctypesDict[typechar]))
ndarrayInterface.shape = (C.c_long * array.ndim)(*array.shape)
ndarrayInterface.strides = (C.c_long * array.ndim)(*array.strides)
for n in range(array.ndim):
ndarrayInterface.strides[n] /= array.dtype.itemsize
return ndarrayInterface
| Python |
env = Environment()
DEBUG = ARGUMENTS.get('DEBUG','0')
#env.Replace(CFLAGS=['-O2','-Wall','-ansi','-pedantic'])
#env.Replace(CFLAGS=['-O2','-Wall','-ansi','-pedantic'])
env.Replace(CPPPATH=['/opt/local/Library/Frameworks/Python.framework/Versions/2.6/include','/opt/local/include'])
if DEBUG=='1':
print "DEBUG"
env.Replace(CXXFLAGS=['-O2','-Wall','-DDEBUG'])
else:
print "NO DEBUG"
env.Replace(CXXFLAGS=['-O2','-Wall'])
#env.SharedLibrary(target='MRF', source=['MRF.cpp'])
#env.SharedLibrary(target='MRF', source=['MRF_RGB.cpp'])
SL = env.SharedLibrary(target='MRF', source=['MRF_RGB.cpp'])
#env.Alias('install', ['../python/']) | Python |
"""
Basic functions and utilities
"""
class RepoConfig(object):
def __init__(self):
self.repos = {}
self.defaults = {}
self.autoUpdatePath = True
def get3(self, opt, repo, path):
if not (repo, opt) in self.repos:
return self.defaults.get(opt)
keys, map = self.repos[repo, opt]
for k in keys:
if path.startswith(k):
return map[k]
return self.defaults.get(opt)
def set3(self, opt, repo, path, value):
if not (repo, opt) in self.repos:
repocfg = {}
repocfg[path] = value
v = [None, repocfg]
self.repos[repo, opt] = v
else:
v = self.repos[repo, opt]
v[1][path] = value
if self.autoUpdatePath:
self.updatePath(v)
def setAutoUpdatePath(self, v):
self.autoUpdatePath = v
def updatePaths(self):
for v in self.repos.values():
self.updatePath(v)
def updatePath(self, v):
keys = v[1].keys()
keys.sort(reverse = True)
v[0] = keys
def setDefault(self, opt, value):
self.defaults[opt] = value
# ---- end of RepoConfig
def LoadRepoConfig(fn):
cf = RepoConfig()
m = __import__(fn)
m.setup(cf)
return cf
# -- end
def FileExtMatch(pattern, ext):
if pattern == None or pattern == "":
return True
tokens = pattern.split(',')
for token in tokens:
if token == '+':
return True
elif token == '-':
return False
sign = '+'
if token[0] in ('+', '-'):
sign = token[0]
token = token[1:]
if ext == token:
if sign == '+':
return True
else:
return False
return False
# --end--
def VersionString(l):
return '.'.join(['%s' % x for x in l])
# --end--
def FileExt(fn):
p1, p2, p3 = fn.rpartition('.')
if not p2:
return ''
if p3.find('/') != -1:
return ''
return p3.lower()
# --end--
# vim: ts=2 expandtab ai sts=2
| Python |
import os, sys, unittest
import pysvn
from mock import Mock
from svncommitchecker import CommitContext
from svncommitchecker import CommitChecker
from scmtools import RepoConfig
class DummyClass(object): pass
class CommitContextTests(unittest.TestCase):
def testBase0(self):
ctx = CommitContext()
ctx.d('debug0')
ctx.d('debug1')
ctx.e('error0')
ctx.e('error1')
ctx.w('error2')
ctx.w('error2')
ctx.w('error3')
ctx.o('ierror0')
ctx.o('ierror1')
ctx.o('ierror0')
assert ctx.debugs == ['debug0', 'debug1']
assert ctx.errors == ['error0', 'error1']
assert ctx.warnings == ['error2', 'error2', 'error3']
assert ctx.outlines == set(('ierror0', 'ierror1'))
# -- end
def testIsOK(self):
c0 = CommitContext()
assert c0.isOK() == True
c0.w('warning0')
assert c0.isOK() == True
c0.e('error0')
assert c0.isOK() == False
c0.errors = []
assert c0.isOK() == True
c0.o('outline0')
assert c0.isOK() == False
# ----end----
class MessageCheckerTests(unittest.TestCase):
def createContext(self, txn):
ctx = CommitContext()
ctx.txn = txn
return ctx
# --end--
def createContextByMessage(self, msg):
txn = DummyClass()
txn.revpropget = Mock()
txn.revpropget.return_value = msg
ctx = self.createContext(txn)
return ctx
# --end--
def mockChecker(self, msg):
checker = CommitChecker(None, None, None)
checker.ctx = self.createContextByMessage(msg)
return checker
def testOkMessage(self):
cc = self.mockChecker(u'hello-world, this is a good message')
cc.Check__CommitMessage()
assert cc.ctx.isOK()
def testEmptyMessage(self):
cc = self.mockChecker(u'')
cc.Check__CommitMessage()
ctx = cc.ctx
assert not ctx.isOK()
assert ctx.errors[0].split()[0] == 'MSG-E1'
def testShortenMessage(self):
cc = self.mockChecker(u'shortmsg')
cc.Check__CommitMessage()
ctx = cc.ctx
assert not ctx.isOK()
assert ctx.errors[0].split()[0] == 'MSG-E2'
# ----end----
class CommitCheckerTests(unittest.TestCase):
def mockContext0(self, changed):
ctx = CommitContext()
ctx.txn = DummyClass()
ctx.txn.changed = Mock()
ctx.txn.changed.return_value = changed
return ctx
# --end--
def testChangeFilenames(self):
ctx = self.mockContext0({
'is/a': ('D', pysvn.node_kind.file, 1, 0),
'is/b': ('R', pysvn.node_kind.file, 1, 0),
'is/c': ('A', pysvn.node_kind.dir, 1, 0),
'is/d': ('A', pysvn.node_kind.file, 1, 1),
})
cc = CommitChecker(None, None, None)
cc.ctx = ctx
cc.txn = ctx.txn
assert set(cc.getChangedFilenames()) == set(['is/b', 'is/d'])
# --end--
def mockChecker2(self, repoPath, cf):
cc = CommitChecker(cf, repoPath, None)
ctx = DummyClass()
cc.ctx = ctx
ctx.repoPath = repoPath
cc.cf = cf
return cc
# --end--
def testIsBinaryFileByConfig(self):
R0 = '/R0'
cf = RepoConfig()
cf.setDefault('binary-ext', 'obj,lib,html,js')
cf.set3('binary-ext', R0, 'abc/', 'rmvb,avi,txt')
cf.set3('binary-ext', R0, 'abc/def/', 'sln,lib')
cf.set3('binary-ext', R0, 'abcdef/', '+')
cc = self.mockChecker2(R0, cf)
assert cc.isBinaryFileByConfig(R0, 'abc/def.avi') == True
assert cc.isBinaryFileByConfig(R0, 'abc/def.java') == False
assert cc.isBinaryFileByConfig(R0, 'abcdef/test.abc') == True
assert cc.isBinaryFileByConfig(R0, 'abc/defhgi') == True
assert cc.isBinaryFileByConfig(R0, 'abc/def/ssh.cpp') == False
assert cc.isBinaryFileByConfig(R0, 'abc/def/ssh.lib') == True
# --cend--
# vim: ts=2 sts=2 expandtab ai
| Python |
import os, sys, unittest, inspect
from scmtools import RepoConfig, LoadRepoConfig
from scmtools import VersionString, FileExtMatch
from scmtools import FileExt
class SCMToolsTests(unittest.TestCase):
def testFileExtMatch(self):
assert FileExtMatch(None, 'java')
assert FileExtMatch('', 'java')
assert FileExtMatch('+', 'java')
assert not FileExtMatch('-', 'java')
assert FileExtMatch('java', 'java')
assert FileExtMatch('+java', 'java')
assert not FileExtMatch('+java', 'c')
assert not FileExtMatch('-java', 'java')
assert FileExtMatch('java,c', 'java')
assert FileExtMatch('java,c', 'c')
assert FileExtMatch('-java,+', 'c')
assert not FileExtMatch('java,c', 'cpp')
assert not FileExtMatch('-java,c', 'java')
assert not FileExtMatch('java,-', 'c')
assert FileExtMatch('java,c,cpp,txt,-', 'txt')
assert not FileExtMatch('java,c,cpp,-txt,+', 'txt')
def testVersionString(self):
assert VersionString((1,2,3)) == '1.2.3'
assert VersionString((1,)) == '1'
assert VersionString((1,2,'dev','pre5')) == '1.2.dev.pre5'
assert VersionString(['dev', 2]) == 'dev.2'
# --end--
def testFileExt(self):
assert FileExt('abc.jpg') == 'jpg'
assert FileExt('abcdef') == ''
assert FileExt('hello.world/abc') == ''
assert FileExt('abc.Def') == 'def'
# --end--
# --cend--
class RepoConfigBaseTests(unittest.TestCase):
def testSetDefault(self):
repo = RepoConfig()
repo.setDefault('encoding', 'gbk')
self.assert_(repo.defaults.get('encoding') == 'gbk')
repo.setDefault('encoding', 'utf8')
self.assert_(repo.defaults.get('encoding') == 'utf8')
self.assert_(repo.defaults.get('otherthings') == None)
repo.setDefault('en', 'gbk')
def testSet3(self):
repo = RepoConfig()
repo.setAutoUpdatePath(False)
r0 = '/R0'
r1 = '/R1'
p0 = 'Path0/'
p1 = 'Path1/'
p2 = 'Path2/'
o0 = 'encoding'
rs = repo.repos
repo.set3(o0, r0, p0, 'gbk')
assert rs[r0, o0][1][p0] == 'gbk'
repo.set3(o0, r0, p1, 'utf8')
assert not rs[r0, o0][1][p0] == 'utf8'
assert rs[r0, o0][1][p1] == 'utf8'
def testUpdatePath(self):
repo = RepoConfig()
repo.setAutoUpdatePath(False)
r0 = "/R0"
opt = "encoding"
repo.set3(opt, r0, 'abc/', 'utf8')
repo.set3(opt, r0, 'abcdef/', 'utf8')
repo.set3(opt, r0, 'abc/def/', 'utf8')
repo.set3(opt, r0, '', 'gbk')
repo.updatePaths()
v = repo.repos.get((r0, opt))
assert v[0] == ['abcdef/', 'abc/def/', 'abc/', '']
def testAutoUpdatePath(self):
repo = RepoConfig()
r0 = "/R0"
opt = "encoding"
repo.set3(opt, r0, 'abc/', 'utf8')
repo.set3(opt, r0, 'abcdef/', 'utf8')
repo.set3(opt, r0, 'abc/def/', 'utf8')
repo.set3(opt, r0, '', 'gbk')
v = repo.repos.get((r0, opt))
assert v[0] == ['abcdef/', 'abc/def/', 'abc/', '']
def testGet3(self):
repo = RepoConfig()
r0 = '/R0'
r1 = '/R1'
r2 = '/R2'
opt = 'encoding'
opt2 = 'encoding2'
repo.setDefault(opt, 'v0')
repo.set3(opt, r0, 'abc/', 'v1')
repo.set3(opt, r0, 'abcdef/', 'v2')
repo.set3(opt, r0, 'abc/def/', 'v3')
repo.set3(opt, r1, '', 'v4')
assert repo.get3(opt2, r0, '') == None
assert repo.get3(opt, r2, 'abc/') == 'v0'
assert repo.get3(opt, r1, 'abc/') == 'v4'
assert repo.get3(opt, r0, 'abc/def') == 'v1'
assert repo.get3(opt, r0, 'abc/def/abc') == 'v3'
assert repo.get3(opt, r0, 'abcdef/abc') == 'v2'
assert repo.get3(opt, r0, 'def/') == 'v0'
assert repo.get3(opt2, r0, 'abcdef/abc') == None
assert repo.get3(opt2, r2, 'abc/def') == None
def testLoadRepoConfig(self):
# -- create test repo config --
fout = file('repo_cf_test0.py', 'w')
fout.write("""
#
# Repo config for test0
#
def setup(cf):
r0 = '/R0'
r0 = '/R0'
r1 = '/R1'
r2 = '/R2'
opt = 'encoding'
opt2 = 'encoding2'
cf.setDefault(opt, 'v0')
cf.set3(opt, r0, 'abc/', 'v1')
cf.set3(opt, r0, 'abcdef/', 'v2')
cf.set3(opt, r0, 'abc/def/', 'v3')
cf.set3(opt, r1, '', 'v4')
# vim: ts=2 sts=2 expandtab ai
""")
fout.close()
repo = LoadRepoConfig('repo_cf_test0')
r0 = '/R0'
r1 = '/R1'
r2 = '/R2'
opt = 'encoding'
opt2 = 'encoding2'
assert repo.get3(opt2, r0, '') == None
assert repo.get3(opt, r2, 'abc/') == 'v0'
assert repo.get3(opt, r1, 'abc/') == 'v4'
assert repo.get3(opt, r0, 'abc/def') == 'v1'
assert repo.get3(opt, r0, 'abc/def/abc') == 'v3'
assert repo.get3(opt, r0, 'abcdef/abc') == 'v2'
assert repo.get3(opt, r0, 'def/') == 'v0'
assert repo.get3(opt2, r0, 'abcdef/abc') == None
assert repo.get3(opt2, r2, 'abc/def') == None
try:
os.unlink('repo_cf_test0.py')
os.unlink('repo_cf_test0.pyc')
except OSError:
pass
# ---- end of RepoConfigBaseTests
if __name__ == '__main__':
unittest.main()
# vim: ts=2 sts=2 expandtab ai
| Python |
# -*- coding: utf8 -*-
#
# -- Subversion Pre Commit Hook --
#
__VERSION__ = '0.0.1.1'
__PROGNAME__ = 'IS Subversion Precommit Checker'
import sys, os
import pysvn
from scmtools import RepoConfig, LoadRepoConfig, FileExtMatch, VersionString, FileExt
class CommitContext(object):
def __init__(self):
self.debugs = []
self.errors = []
self.warnings = []
self.outlines = set()
def d(self, msg):
self.debugs.append(msg)
def e(self, msg):
self.errors.append(msg)
def w(self, msg):
self.warnings.append(msg)
def o(self, msg):
self.outlines.add(msg)
def isOK(self):
if len(self.outlines) == 0 and len(self.errors) == 0:
return True
else:
return False
# --CEND--
class CommitChecker(object):
def __init__(self, cf, repoPath, txnid):
self.cf = cf
self.repoPath = repoPath
self.txnid = txnid
def setup(self):
self.txn = pysvn.Transaction(self.repoPath, self.txnid)
# Create context
self.ctx = CommitContext()
self.ctx.repoPath = self.repoPath
self.ctx.txnid = self.txnid
self.ctx.txn = self.txn
self.ctx.cf = self.cf
# --end--
def defeatMailReport(self):
cf = self.cf
ctx = self.ctx
txn = ctx.txn
repoPath = ctx.repoPath.encode('utf8')
if not cf.get3('defeat-email', ctx.repoPath, ''):
return
revprops = txn.revproplist()
author = revprops['svn:author']
log = revprops['svn:log']
txnid = ctx.txnid
subject = "defeat commit %s - %s" % (author, repoPath)
sender = cf.get3('defeat-email-from', ctx.repoPath, '')
receiptor = cf.get3('defeat-email-to', ctx.repoPath, '')
if not sender or not receiptor:
return
bodys = []
headers = [
'From: %s' % sender,
'To: %s' % receiptor,
'Subject: %s' % subject,
'Content-Type: text/plain; charset="utf-8"',
'MIME-Version: 1.0',
'Content-Transfer-Encoding: 8bit',
]
bodys.append(headers)
info0 = ['---- COMMIT ----',
'Repository: %s' % repoPath,
'Author: %s' % author,
'Message: %s' % log,
'TXNID: %s' % txnid,
]
bodys.append(info0)
finfos = ['---- FILES ----']
changed = txn.changed()
fns = changed.keys()
fns.sort()
for fn in fns:
finfo = changed[fn]
if finfo[1] == pysvn.node_kind.file:
s1 = 'F'
elif finfo[1] == pysvn.node_kind.dir:
s1 = 'D'
elif finfo[1] == pysvn.node_kind.none:
s1 = 'N'
else:
s1 = 'U'
finfos.append('%s %s %s %s - %s' % (
finfo[0], s1, finfo[2], finfo[3], fn.encode('utf8')))
bodys.append(finfos)
errors = ['---- ERRORS ----']
errors.extend(ctx.errors)
bodys.append(errors)
debugs = ['---- DEBUGS ----']
debugs.extend(ctx.debugs)
bodys.append(debugs)
outlines = ['---- OUTLINES ----']
outlines.extend(ctx.outlines)
bodys.append(outlines)
body = '\n\n'.join(['\n'.join(x) for x in bodys])
fout = os.popen('/usr/sbin/sendmail -F%s %s' % (sender, receiptor), 'w')
fout.write(body)
fout.close()
def getChangedFilenames(self):
txn = self.txn
txnChanged = txn.changed()
res = []
for fn, entry in txnChanged.items():
if entry[1] != pysvn.node_kind.file:
continue
if entry[0] == 'D':
continue
res.append(fn)
return res
# --end--
def checkFileExtConfig(self, opt, repoPath, path):
cf = self.cf
ext = FileExt(path)
if ext == '':
return True
exts = cf.get3(opt, repoPath, path)
return FileExtMatch(exts, ext)
# --end--
def isBinaryFileByConfig(self, repoPath, path):
return self.checkFileExtConfig('binary-ext', repoPath, path)
# --end--
def isSourceFileByConfig(self, repoPath, path):
return self.checkFileExtConfig('source-ext', repoPath, path)
# --end--
def isBinaryFile(self, path):
if self.isBinaryFileByConfig(self.ctx.repoPath, path):
return True
# TODO: check svn props
return False
# --end--
def run(self):
self.setup()
ctx = self.ctx
# -- commit level checks
self.Check__CommitMessage()
# -- create changed files list
fns = self.getChangedFilenames()
for fn in fns:
self.Check__FileCore(fn)
if ctx.isOK():
return
print >> sys.stderr, '--ERRORS--'
print >> sys.stderr, '\n'.join(ctx.errors)
print >> sys.stderr, '\n--OUTLINES--'
print >> sys.stderr, '\n'.join(ctx.outlines)
self.defeatMailReport()
adminEmail = self.cf.get3('admin-email', ctx.repoPath, '')
if adminEmail:
print >> sys.stderr
print >> sys.stderr, '--NOTE--'
print >> sys.stderr, '如果有任何意见或者建议,请联系 %s' % adminEmail
sys.exit(1)
# --end--
def Check__FileCore(self, path):
cf = self.cf
ctx = self.ctx
txn = ctx.txn
repoPath = ctx.repoPath
if self.isBinaryFile(path):
# binary file is passed directly.
return
if cf.get3('check-utf8', repoPath, path):
if self.isSourceFileByConfig(repoPath, path):
self.Check__UTF8(path)
if cf.get3('check-bom', repoPath, path):
self.Check__BOM(path)
# --end--
def Check__BOM(self, path):
ctx = self.ctx
content = self.ctx.txn.cat(path)
upath = path.encode('utf8')
if content[:3] == '\xef\xbb\xbf':
ctx.e("BOM-E1 %s 含有Unicode BOM头标志" % upath)
ctx.o("BOM-O1 请清除相关文件的BOM头")
return
# --end--
def Check__UTF8(self, path):
ctx = self.ctx
content = self.ctx.txn.cat(path)
linenum = 1
lines = []
texts = content.split("\n")
for line in texts:
try:
line.decode('utf8')
except UnicodeDecodeError, e:
lines.append(str(linenum))
linenum += 1
upath = path.encode("utf8")
if lines:
ctx.e("UTF-E1 %s 包含非法的UTF8字符(文件必须是UTF8编码)" % (upath))
ctx.e("UTF-E1 %s 存在问题的行: %s" % (upath, ",".join(lines)))
ctx.o("UTF-O1 请仔细检查并修正文件编码问题")
# --end--
def Check__CommitMessage(self):
ctx = self.ctx
mesg = ctx.txn.revpropget("svn:log")
if len(mesg) == 0:
ctx.o('MSG-O1 请填写完整的提交消息')
ctx.e('MSG-E1 提交消息为空')
return
if len(mesg) < 10:
ctx.o('MSG-O2 真的没什么可说的吗? 消息长度要大于10')
ctx.e('MSG-E2 提交消息太短')
return
# --CEND--
def Main():
print >>sys.stderr, '= %s (v%s)' % (__PROGNAME__, __VERSION__)
print >>sys.stderr, '- python-%s, pysvn-%s, subversion-%s' % (
VersionString(sys.version_info[:3]),
VersionString(pysvn.version),
VersionString(pysvn.svn_version[:3]))
if len(sys.argv) < 4:
print >>sys.stderr, '! is-precommit-checker need three command line arguments:'
print >>sys.stderr, '! python is-precommit {config_name} {repo_path} {txnid}'
sys.exit(1)
cf = LoadRepoConfig(sys.argv[1])
checker = CommitChecker(cf, sys.argv[2], sys.argv[3])
checker.run()
sys.exit(0)
# ---- end of Main
if __name__ == '__main__':
Main()
# vim: ts=2 sts=2 expandtab ai encoding=utf8
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Space - Simple file storage service
#
from __future__ import with_statement
SPACE_VERSION = '0.0.3.6'
import time, os, threading
import zlib
from struct import pack as ipack, unpack as iunpack
import constants as CC
from service import ServiceBase
from protocol import CreateMessage, O3Channel, O3Call
from protocol import GetDataFromSocketToFile, GetMessageFromSocket
from protocol import GetDataFromSocketToISZIP
from utility import mkdir_p, PrepareDir
from utility import D as _D, D2 as _D2
from utility import RoomLocation, RoomLocationToTuple
from utility import LogFileSpliter
def FileLength(filename):
s = os.stat(filename)
return s[6]
# ====
__all__ = (
'SpaceStroage', 'SpaceItem',
'FileItem', 'FileStorage',
'SpaceService',
)
class SpaceItem(object): pass
class SpaceStorage(object): pass
# ===
class FileStorage(SpaceStorage):
def __init__(self, base):
self.base = base
class FileItem(SpaceItem):
BLOCKSIZE = 524288
def __init__(self, storage, name, size, attrs):
self.base = storage.base
self.ready = False
self.attrs = attrs
self.name = name
self.size = size
self.path = '%s/%s' % (self.base, self.name)
self.offset = 0
self.deletable = True
def isReady(self):
return self.ready
def fillSnipFromSocket(self, socket):
fin = file(self.path, 'w')
try:
rest = self.size
while True:
if rest > self.BLOCKSIZE:
blocksize = self.BLOCKSIZE
else:
blocksize = rest
buffer = socket.recv(blocksize)
if not buffer:
return False
fin.write(buffer)
rest -= len(buffer)
if rest == 0:
break
finally:
fin.close()
if rest == 0:
self.ready = True
return True
else:
return False
def pushSnipToSocket(self, socket):
fin = file(self.path, 'r')
if self.offset:
fin.seek(self.offset)
try:
rest = self.size
while True:
if rest > self.BLOCKSIZE:
blocksize = self.BLOCKSIZE
else:
blocksize = rest
buffer = fin.read(blocksize)
if not buffer:
return False
socket.sendall(buffer)
rest -= len(buffer)
if rest == 0:
break
finally:
fin.close()
if rest == 0:
return True
else:
return False
def unlink(self):
if self.deletable:
try:
os.unlink(self.path)
except:
pass
self.deletable = False
# ====
class Room(object):
def __init__(self, id, label, base, capacity, used):
self.id = id
self.label = label
self.base = base
self.capacity = capacity
self.used = used
# ====
class SpaceService(ServiceBase):
SVCID = CC.SVC_SPACE
svcDescription = "Data space service (simple filesystem service)"
svcName = 'SPACE'
svcVersion = SPACE_VERSION
ADVERT_INTERVAL = 7
def __init__(self, server):
ServiceBase.__init__(self)
self.lock = threading.Lock()
self.server = server
self.snips = {}
self.rooms = None
self.roomtasks = {}
# ---
def setup(self, conf):
cf = conf['space']
self.storage = FileStorage(cf['path'])
self.resultPath = cf.get('respath', '/pub/o3res')
if cf.get('roommode', None) == 'autoconfig':
S = O3Channel().connect(self.server.resolv('WAREHOUSE'))
res = S(CC.SVC_WAREHOUSE, 'AUTOCONFIG', self.server.zone, self.server.id)
S.close()
if res[0] == CC.RET_OK and res[2] != None:
self.setupRoom(res[2])
def activate(self):
self.server.addTimer2('room_advert', self.ADVERT_INTERVAL, True,
self.advert, args = ())
def setupRoom(self, conf):
self.rooms = {}
for r in conf['rooms']:
room = Room(*r)
self.rooms[room.label] = room
mkdir_p(room.base)
# ---
self.rooms['Z'] = Room(0, 'Z', '/', 0, 0)
# ---
def advert(self):
entry = None
with self.lock:
if self.rooms:
entry = self.server.resolv('WAREHOUSE')
nodeid = self.server.id
starttime = self.server.starttime
tasks = self.roomtasks.keys()
rooms = [ r.id for r in self.rooms.values() if r.id != 0 ]
rooms.sort()
if entry:
S = O3Channel().connect(entry)
res = S(CC.SVC_WAREHOUSE, 'ROOMADVERT',
nodeid, self.server.entry, starttime, tasks, rooms)
S.close()
# ---
def put2(self, id, path, size, offset,attrs = None, deletable = False):
if self.snips.has_key(id):
self.snips[id].unlink()
del self.snips[id]
item = FileItem(self.storage, id, size, attrs)
item.offset = offset
item.path = path
item.ready = True
item.deletable = deletable
self.snips[id] = item
# ---
def exportPUT2(self, channel, id, path, size,
offset, attrs, deletable = False):
self.put2(id, path, size, offset, attrs, deletable)
return (CC.RET_OK, self.SVCID, id, size)
def exportPUT(self, channel, id, size, attrs):
if self.snips.has_key(id):
self.snips[id].unlink()
del self.snips[id]
channel.send(CreateMessage(
CC.RET_CONTINUE, self.SVCID, 0))
item = FileItem(self.storage, id, size, attrs)
item.fillSnipFromSocket(channel)
if item.isReady():
self.snips[id] = item
return (CC.RET_OK, self.SVCID, id, size)
else:
del item
# TODO more error detail here
return (CC.RET_ERROR, self.SVCID, CC.ERROR_SPACE_PUT)
def exportGET(self, channel, id, attrs):
if not self.snips.has_key(id):
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
item = self.snips[id]
channel.send(CreateMessage(CC.RET_CONTINUE, self.SVCID, id, item.size))
item.pushSnipToSocket(channel)
return ((CC.RET_OK, self.SVCID, 0),
'id:%s size:%.2fm' % (id, item.size/1024.0/1024))
def exportDELETE(self, channel, id):
if not self.snips.has_key(id):
return (CC.RET_ERROR, self.SVCID, CC.ERROR_SPACE_NO_SUCH_SNIP)
item = self.snips[id]
item.unlink()
del self.snips[id]
return (CC.RET_OK, self.SVCID, id)
def exportCLEANALL(self, channel):
for s in self.snips.values():
s.delete()
self.snips.clear()
return (CC.RET_OK, self.SVCID, 0)
def exportLISTALL(self, channel):
return (CC.RET_OK, self.SVCID, self.snips.keys())
def exportLOCALPATH(self, channel, id):
return (CC.RET_OK, self.SVCID, '%s/%s' % (self.storage.base, id))
# ---
def exportROOMADDCONFIG(self, channel, ri):
if self.rooms.has_key(ri[1]):
return (CC.RET_ERROR, self.SVCID,
CC.ERROR_WAREHOUSE_DUPLICATION_ROOMLABEL)
room = Room(*ri)
self.rooms[room.label] = room
mkdir_p(room.base)
return (CC.RET_OK, self.SVCID, 0)
# ---
def exportROOMGET1(self, channel, label, path, offset, size, entityid = 0):
room = self.rooms.get(label, None)
if room == None:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_SPACE_NO_SUCH_ROOM)
path = '%s/%s' % (room.base, path)
if not os.path.isfile(path):
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
if size == 0:
size = FileLength(path) - offset
channel.send(CreateMessage(CC.RET_OK, self.SVCID, size))
fin = file(path, 'r')
starttime = time.time()
if offset:
fin.seek(offset)
try:
rest = size
while rest != 0:
if rest > 512000:
blocksize = 512000
else:
blocksize = rest
contents = fin.read(blocksize)
channel.sendall(contents)
rest -= blocksize
finally:
fin.close()
endtime = time.time()
return (
(CC.RET_OK, self.SVCID, size),
'E-%d -%d %.2fMB/%.2fs' % (
entityid, offset, size / 1024.0/1024, endtime - starttime))
# ---
def exportROOMGET3(self, channel, P):
label = P['label']
name = P['name']
offset = P.get('offset', 0)
wantblocks = P.get('blocks', 0)
entityid = P.get('entityid', 0)
room = self.rooms.get(label, None)
if room == None:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_SPACE_NO_SUCH_ROOM)
path = '%s/%s' % (room.base, name)
if not os.path.isfile(path):
return (CC.RET_ERROR, self.SVCID, CC_ERROR_NO_SUCH_OBJECT)
starttime = time.time()
size0 = 0
size1 = 0
try:
fin = file(path, 'rb')
headblock = fin.read(0x10000)
filehead = iunpack('4sIII4sIIIQQ4I', headblock[:64])
blocks = filehead[6]
blocksize = filehead[7]
if wantblocks == 0:
wantblocks = blocks - offset
else:
wantblocks = min(blocks - offset, wantblocks)
channel.send(CreateMessage(CC.RET_OK,self.SVCID, wantblocks))
for i in xrange(offset, offset + wantblocks):
blockheadstr = headblock[64 + i * 32: 64 + i * 32 + 32]
blockhead = iunpack("QII4I", blockheadstr)
if i == offset:
fin.seek(blockhead[0] + 0x10000)
binsize = blockhead[1]
size0 += binsize
size1 += blockhead[2] # boutsize
ccontent = fin.read(binsize)
channel.sendall(blockheadstr)
channel.sendall(ccontent)
endtime = time.time()
return ((CC.RET_OK, self.SVCID, wantblocks),
'E-%d -%d %.2fMB(%.2fMB)/%.2fs' % (
entityid, offset, size1/1024.0/1024, size0/1024.0/1024,
endtime - starttime))
finally:
fin.close()
# ---
def exportROOMDROPSHADOW(self, channel, label, name):
room = self.rooms.get(label)
path = '%s/%s' % (room.base, name)
try:
os.unlink(path)
_D2('ROOM.DROPSHADOW %s' % path)
except OSError, e:
return (CC.RET_ERROR, self.SVCID, e.errno)
return (CC.RET_OK, self.SVCID, 0)
def exportROOMSHADOWLIST(self, channel, label):
if type(label) == str:
room = self.rooms.get(label, None)
else:
room = None
if not room:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
files = []
for path, dirnames, filenames in os.walk(room.base):
files.extend(['%s/%s' % (path, f) for f in filenames])
baselen = len(room.base) + 1
return ((CC.RET_OK, self.SVCID, [ x[baselen:] for x in files ]),
{'shadows': len(files),})
# ---
def exportROOMSHADOWSTAT(self, channel, label, name):
if type(label) == str:
room = self.rooms.get(label, None)
else:
room = None
if not room:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
path = '%s/%s' % (room.base, name)
try:
s = os.stat(path)
return ((CC.RET_OK, self.SVCID, list(s)), '%s/%s' % (label, name))
except OSError, e:
return ((CC.RET_ERROR, self.SVCID, ('OSError', e.filename, e.errno, e.strerror)),
'%s/%s-E' % (label, name))
# ---
def exportROOMCLEAN(self, channel, label, names):
count = 0
room = self.rooms.get(label)
files = []
dirs = []
base = room.base
for path, dirnames, filenames in os.walk(room.base):
files.extend([ '%s/%s' % (path, f) for f in filenames])
dirs.extend([ '%s/%s' % (path, d) for d in dirnames])
files2 = set([ '%s/%s' % (base, name) for name in names])
for path in files:
if path not in files2:
_D2('ROOM.CLEAN remove %s' % (path))
os.unlink(path)
count += 1
dirs.sort()
dirs.reverse()
for d in dirs:
try:
os.rmdir(d)
_D2('ROOM.CLEAN rmdir %s' % d)
count += 1
except OSError, e:
pass
return (CC.RET_OK, self.SVCID, count)
# ---
def exportROOMRECONFIG(self, channel):
S = O3Channel().connect(self.server.resolv('WAREHOUSE'))
res = S(CC.SVC_WAREHOUSE,
'AUTOCONFIG', self.server.zone,
self.server.id)
S.close()
if res[0] == CC.RET_OK and res[2] != None:
self.setupRoom(res[2])
else:
self.rooms = None
return (CC.RET_OK, self.SVCID, res[0])
# ---
def exportADDENTITY(self, channel, name, path):
if name.startswith('/'):
name = name.strip('/')
try:
s = os.stat(path)
except OSError:
return (CC.RET_OK, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
# Perform a stat() system call on the given path. The return
# value is an object whose attributes correspond to the members
# of the stat structure, namely:
# st_mode (protection bits), st_ino (inode number), st_dev (device),
# st_nlink (number of hard links), st_uid (user ID of owner),
# st_gid (group ID of owner), st_size (size of file, in bytes),
# st_atime (time of most recent access),
# st_mtime (time of most recent content modification),
# st_ctime (platform dependent;
# time of most recent metadata change on Unix, or the time
# of creation on Windows).
ei = {
'name': name,
'node': self.server.id,
'entry': self.server.entry,
'path': path.strip('/'),
'size': s[6],
'mtime': s[8],
}
S = O3Channel().connect(self.server.resolv('WAREHOUSE'))
res = S(CC.SVC_WAREHOUSE, 'ADDENTITY', ei)
S.close()
return res
# ---
def exportROOMMIRROR(self, channel, task):
thr = threading.Thread(
name = task['taskid'],
target = self.mirrorRoom,
args = (task,))
thr.setDaemon(1)
thr.start()
return (CC.RET_OK, self.SVCID, 0)
def mirrorRoom(self, task):
task['result'] = 0
whentry = self.server.resolv('WAREHOUSE')
S = O3Channel()
with self.lock:
self.roomtasks[task['taskid']] = task
try:
srcloc = task['source']
snode, sentry, slabel, spath = RoomLocationToTuple(srcloc)
droom = self.rooms[task['destroomlabel']]
#size = task['size']
size = 0
localpath = '%s/%s' % (
droom.base, task['name'])
# open remote file
S.connect(sentry)
res = S(CC.SVC_SPACE, 'ROOMGET1',
slabel, spath, 0, size, task['entityid'])
if res[0] == CC.RET_OK:
size = res[2]
mkdir_p(os.path.dirname(localpath))
if localpath.endswith('.iz0') and not spath.endswith('.iz0'):
odsize = GetDataFromSocketToISZIP(S.socket, localpath, size)
task['compress'] = 'iz0'
task['compressedsize'] = odsize
else:
fout = file(localpath, 'w')
GetDataFromSocketToFile(S.socket, fout, size)
fout.close()
res = GetMessageFromSocket(S.socket)
else:
task['result'] = res[2]
finally:
S.close()
try:
O3Call(whentry, CC.SVC_WAREHOUSE, 'MIRRORFINISHED', task)
except: pass
with self.lock:
del self.roomtasks[task['taskid']]
# ---
def exportROOMENTITYSPLIT0(self, channel, label, name, bs, etc = 1.1):
room = self.rooms.get(label, None)
if room == None:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_SPACE_NO_SUCH_ROOM)
fullpath = '/'.join((room.base, name))
if not os.path.isfile(fullpath):
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
spliter = LogFileSpliter(fullpath, bs, etc)
spliter.splitAtLineEnd()
return (CC.RET_OK, self.SVCID,
spliter.size, spliter.res)
# ---
def exportRESULTPUT(self, channel, name, value):
fname = '/'.join((self.resultPath, name))
PrepareDir(fname)
fout = file(fname, 'w')
fout.write(value)
fout.close()
return (
(CC.RET_OK, self.SVCID, len(value)),
"%s %d" % (name, len(value)))
# ---
def exportRESULTGET(self, channel, name):
fname = '/'.join((self.resultPath, name))
try:
fin = file(fname)
value = fin.read()
fin.close()
return (
(CC.RET_OK, self.SVCID, value),
"%s %d" % (name, len(valne)))
except IOError, e:
return (
(CC.RET_ERROR, self.SVCID, e.errno, e.strerror),
"%s E:%d" % (name, e.errno))
# FEATURE/440
# ---
def exportCLEANSNIPS(self, prefix):
deleted = 0
for k in self.snips.keys():
if k.startswith(prefix):
self.snips[k].unlink()
del self.snips[k]
deleted += 1
if not prefix:
prefix = "=ALL="
return (
(CC.RET_OK, self.SVCID, deleted),
"%s %d" % (prefix, deleted))
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Utility
#
import os, time, sys
import socket
from cStringIO import StringIO
from traceback import print_tb, print_stack
def mkdir_p(dir):
if os.path.isdir(dir):
pass
elif os.path.isfile(dir):
raise OSError("a file with same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(dir)
if head and not os.path.isdir(head):
mkdir_p(head)
if tail:
os.mkdir(dir)
def PrepareDir(path):
mkdir_p(os.path.dirname(path))
# ===
dinfo = {}
def LogSetup(di):
dinfo.update(di)
def D2(str, chr = '='):
s = dinfo.get('socket', None)
if s:
try:
s.send("%s %c %s" % (dinfo['hostname'], chr, str))
except socket.error:
pass
def D(str, chr = '='):
print '%s %c %s' % (time.strftime('%H:%M:%S'), chr, str)
s = dinfo.get('socket', None)
if s:
try:
s.send("%s %c %s" % (dinfo['hostname'], chr, str))
except socket.error:
pass
def cout(str):
str = str.replace('\n', ' | ')
D(str, '|')
def DE(e):
D('--%s--' % repr(e), 'E')
D('{{%s}}' % str(e), 'E')
s = StringIO()
print_tb(sys.exc_info()[2], limit = 4, file = s)
for txt in s.getvalue().split('\n'):
D(txt, '|')
# ====
def appendinmap(dict, key, value):
l = dict.get(key, None)
if l != None:
l.append(value)
else:
dict[key] = [value,]
def removeinmap(dict, key, value):
l = dict.get(key)
l.remove(value)
if len(l) == 0:
del dict[key]
def leninmap(dict, key):
return len(dict.get(key, ''))
# ---
def sizeK(size):
return (size + 1023) / 1024
# ---
def RoomLocation(node, entry, label, path): # EXPORT-FUNCTION
if path.startswith('/'):
path = path.strip('/')
return '%s:%s,%d:_%s/%s' % (node,
entry[0], entry[1], label, path)
def RoomLocationToTuple(location): # EXPORT-FUNCTION
(node, entrystr, fullpath) = location.split(':', 2)
addr, port = entrystr.split(',')
entry = (addr, int(port))
label, path = fullpath.split('/', 1)
label = label[1:]
return (node, entry, label, path)
# ---
class LogFileSpliter(object):
def __init__(self, filename, blocksize, etc = 1.05):
self.fn = filename
self.etc = etc
self.bs = blocksize
self.res = []
def getFileSize(self):
return os.stat(self.fn)[6]
def splitAtLineEnd(self):
off = 0
bs = self.bs
maxbs = int(bs * self.etc)
size = self.getFileSize()
self.size = size
fin = file(self.fn, 'r')
try:
while True:
if size < off + maxbs:
return len(self.res)
fin.seek(off + bs)
buffer = fin.read(4096)
padding = buffer.find('\n')
off += bs + padding + 1
self.res.append(off)
finally:
fin.close()
split = splitAtLineEnd
# ===== Simple File Logger
class FileLogger(object):
def __init__(self, fn):
self.fn = fn
self.fout = file(fn, 'a+')
def L(self, str):
self.fout.write('%s %s\n' % (time.strftime('%m%d_%H:%M:%S'), str))
self.fout.flush()
def close(self):
self.fout.close()
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Debug info
#
__all__ = (
'errorInfo', 'svcInfo', 'returnInfo',)
import constants as CC
errorInfo = {}
svcInfo = {}
returnInfo = {}
def I(d, id):
d[getattr(CC, id)] = id
def EI(id):
I(errorInfo, id)
def SI(id):
I(svcInfo, id)
def RI(id):
I(returnInfo, id)
# ----
SI('SVC_SYSTEM')
SI('SVC_BASE')
SI('SVC_NAMES')
SI('SVC_HUB')
SI('SVC_AUTOCONFIG')
SI('SVC_SCHEDULE')
SI('SVC_WAREHOUSE')
SI('SVC_WS')
SI('SVC_SPACE')
def SVCIDToStr(svcid):
if svcInfo.has_key(svcid):
return svcInfo[svcid][4:]
else:
return str(svcid)
# ----
EI('ERROR_UNKNOWN')
EI('ERROR_NO_SERVICE')
EI('ERROR_NO_FUNCTION')
EI('ERROR_NO_SUCH_OBJECT')
EI('ERROR_SPACE_PUT')
EI('ERROR_SPACE_NO_SUCH_SNIP')
EI('ERROR_SPACE_NO_SUCH_ROOM')
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# WorkSpace
#
WORKSPACE_VERSION = '0.0.2.9'
# -----
import threading
import os, sys
from cStringIO import StringIO
from traceback import print_tb, print_stack
from service import ServiceBase
from protocol import CreateMessage, CreateMessage0, \
GetMessageFromSocket, O3Channel, O3Call
from utility import mkdir_p, D as _D, D2 as _D2, DE as _E
import constants as CC
class WorkSpaceService(ServiceBase):
SVCID = CC.SVC_WORKSPACE
svcDescription = "Workspace Service"
svcName = 'WORKSPACE'
svcVersion = WORKSPACE_VERSION
ADVERT_INTERVAL = 5
def __init__(self, server):
self.server = server
self.lock = threading.Lock()
self.codebase = {}
self.jobs = {}
self.local = threading.local()
def setup(self, conf):
cf = conf['workspace']
self.base = cf['base']
self.tag = cf.get('tag', 'normal')
sys.path.append(self.base)
def fillScriptFile(self, fn, contents):
path = '%s/%s' % (self.base, fn)
mkdir_p(os.path.dirname(path))
fout = file(path, 'w')
fout.write(contents)
fout.close()
def loadCodeBase(self, name, version):
if self.codebase.has_key(name):
return True
svcpoint = self.server.resolv('HUB')
channel = O3Channel()
try:
channel.connect(svcpoint)
res = channel(CC.SVC_HUB, 'GETCODEBASE', name, version)
if res[0] == CC.RET_ERROR:
self.local.lastError = res
return False
codebase = res[2]
for fn in codebase['files']:
res = channel(CC.SVC_HUB, 'GETSCRIPTFILE', fn)
if res[0] == CC.RET_ERROR:
self.local.lastError = res
return False
self.fillScriptFile(fn, res[4])
self.codebase[name] = codebase
return True
finally:
channel.close()
def activate(self):
svcids = self.server.svc.keys()
# Register workspace on schedule
schedule = self.server.resolv('SCHEDULE')
entry = self.server.entry
nodeid = self.server.id
starttime = self.server.starttime
self.server.addTimer2('workspace_advert', self.ADVERT_INTERVAL, True,
self.advert, args = (schedule, nodeid, entry, starttime))
self.schedule = schedule
self.nodeid = nodeid
self.entry = entry
# ---
def advert(self, schedule, nodeid, entry, starttime):
self.lock.acquire()
jobs = self.jobs.keys()
self.lock.release()
channel = O3Channel()
channel.connect(schedule)
channel(CC.SVC_SCHEDULE, "WORKSPACEADVERT",
nodeid, entry, self.tag, starttime, jobs)
channel.close()
# ---
def unloadCodeBase(self, name):
if not self.codebase.has_key(name):
return False
codebase = self.codebase[name]
del self.codebase[name]
for mn in codebase['modules']:
try:
del sys.modules[mn]
except KeyError:
pass
return True
def exportLOADCODEBASE(self, channel, name, version):
if self.loadCodeBase(name, version):
return (CC.RET_OK, self.SVCID, 0)
else:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_UNKNOWN)
def exportUNLOADCODEBASE(self, channel, name):
if self.unloadCodeBase(name):
return (CC.RET_OK, self.SVCID, name)
else:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_UNKNOWN)
def exportUNLOADO3LIB(self, channel):
deleted = [ x for x in sys.modules.keys() if x.startswith('o3lib') ]
deleted.sort(reverse = False)
for x in deleted:
del sys.modules[x]
return (CC.RET_OK, self.SVCID, deleted)
def exportSTARTJOB(self, channel, job):
codebasenames = job['codebase']
if type(codebasenames) == str:
codebasenames = [codebasenames, ]
#codebasename = job['codebase']
for codebasename in codebasenames:
if not self.loadCodeBase(codebasename, None):
return (CC.RET_ERROR,
self.SVCID, self.local.lastError[2], self.local.lastError)
thr = threading.Thread(
name = 'WORKSPACE-RUNNER',
target = self.executeJob,
args = (channel, job))
thr.setDaemon(1)
thr.start()
return ((CC.RET_OK, self.SVCID, job.get('jobid')),
job.get('jobid'))
# ---
def jobStartup(self, job):
self.lock.acquire()
try:
self.jobs[job['jobid']] = job
finally:
self.lock.release()
def jobFinished(self, job):
self.lock.acquire()
try:
del self.jobs[job['jobid']]
finally:
self.lock.release()
def executeJob(self, channel, job):
"""JOB Threading"""
#job["threading"] = threading.currentThread()
self.jobStartup(job)
modulename = job['module']
__import__(modulename)
runtimeException = None
try:
bootmod = sys.modules[modulename]
J = bootmod.generateJob(job, self)
J.run()
except Exception, e:
runtimeException = e
reName = e.__class__.__name__
reRepr = repr(e)
reStr = str(e)
s = StringIO()
print_tb(sys.exc_info()[2], limit = 10, file = s)
reTraceback = s.getvalue()
_E(e)
self.jobFinished(job)
info = job.get('info', {})
if runtimeException:
s = StringIO()
print_tb
info['exception'] = {
'typename': reName,
'repr': reRepr,
'str': reStr,
'traceback': reTraceback,
}
channel = O3Channel()
channel.connect(self.schedule)
channel(
CC.SVC_SCHEDULE, 'JOBFINISHED',
self.nodeid,
job.get('jobid'),
job.get('result', None), info)
channel.close()
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Protocol
#
#from cPickle import loads, dumps, HIGHEST_PROTOCOL
from fastmap import _loads as loads, _dumps as dumps
from struct import pack, unpack
from zlib import compress as _compress, decompress as _dcompress
import socket
from o3grid import constants as CC
def CreateMessage(*ins):
#buf = dumps(ins, HIGHEST_PROTOCOL)
buf =dumps(ins)
buflen = pack('!I', len(buf))
return ''.join((buflen, buf))
def CreateMessage0(ins):
#buf = dumps(ins, HIGHEST_PROTOCOL)
buf = dumps(ins)
buflen = pack('!I', len(buf))
return ''.join((buflen, buf))
def GetMessageFromSocket(ins):
head = ins.recv(4)
buflen = unpack('!I', head)[0]
got = 0
contents = []
while got != buflen:
buf = ins.recv(buflen - got)
got += len(buf)
contents.append(buf)
return loads(''.join(contents))
def GetDataFromSocketToFile(sin, fout, size):
rest = size
flags = socket.MSG_WAITALL
while rest != 0:
if rest > 512000:
blocksize = 512000
else:
blocksize = rest
contents = sin.recv(blocksize, flags)
if not contents:
return size - rest
fout.write(contents)
rest -= len(contents)
return size
def GetDataFromSocketToISZIP(
sin, foname, size, linemode = True, bs = 16777216, level = 6):
rest = size
waitall = socket.MSG_WAITALL
bi = []
fout = file(foname, 'wb')
fout.write(chr(0) * 0x10000)
odsize = 0
idsize = 0
pending = ''
while True:
blocksize = min(rest, bs)
if blocksize == 0:
if not pending:
break
content = pending
else:
content = sin.recv(blocksize, waitall)
rest -= len(content)
if linemode:
if content[-1] != '\n':
o = content.rfind('\n')
if o != -1:
newpending = content[o + 1:]
content = content[:o + 1]
else:
newpending = ''
if pending:
content = pending + content
pending = newpending
else:
if pending:
content = pending + content
pending = ''
ccontent = _compress(content, level)
bi.append((odsize, len(ccontent), len(content)))
odsize += len(ccontent)
idsize += len(content)
fout.write(ccontent)
head0 = pack(
'4sIII4sIIIQQ4I',
'ISZ0', 0, 0, 0,
'HD01', 0, len(bi), bs,
odsize, idsize,
0, 0, 0, 0)
head1 = ''.join([
pack('QII4I', x[0], x[1], x[2], 0, 0, 0, 0) for x in bi
])
fout.seek(0)
fout.write(head0)
fout.write(head1)
fout.close()
return odsize
# ======
class O3Channel(object):
def __init__(self):
self.socket = None
pass
def connect(self, addr):
self.addr = addr
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.socket.connect(self.addr)
return self
def __call__(self, *params):
self.socket.send(CreateMessage0(params))
return GetMessageFromSocket(self.socket)
def getMessage(self):
return GetMessageFromSocket(self.socket)
def close(self):
if self.socket:
self.socket.close()
self.socket = None
def recvAll(self, len):
return self.socket.recv(len, socket.MSG_WAITALL)
def sendAll(self, buffer):
return self.socket.sendall(buffer)
# ======
def O3Call(entry, *param):
S = O3Channel().connect(entry)
res = S(*param)
S.close()
return res
# ===
class O3Space(object):
def __init__(self, addr = None):
self.addr = addr
self.error = 0
def PUT(self, id, content):
S = O3Channel()
try:
length = len(content)
if self.addr:
S.connect(self.addr)
else:
S.connect(('127.0.0.1', CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'PUT', id, length, None)
if res[0] != CC.RET_CONTINUE:
self.error = res[2]
return False
S.sendAll(content)
res = S.getMessage()
if res[0] == CC.RET_OK:
return True
self.error = res[2]
return False
finally:
S.close()
def GET(self, id):
S = O3Channel()
try:
S.connect(self.addr)
res = S(CC.SVC_SPACE, 'GET', id, None)
if res[0] == CC.RET_ERROR:
self.error = res[2]
return None
length = res[3]
content = S.recvAll(length)
if len(content) != length:
return None
res = S.getMessage()
return content
finally:
S.close()
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Mission and Job
#
import threading, time
import constants as CC
class MissionBase(object): pass
class SJobBase(object): pass
# ------
# mid - Mission id
# jid - Job ID
# jobid - Full job ID
# mname - Mission name
# jname - Job name
# ------
class Mission(MissionBase):
def __init__(self, id, kwargs = None):
self.serial = 0
self.id = id
self.jobs = {}
self.waitJobs= {}
self.readyJobs = {}
self.runJobs = {}
self.state = CC.SMISSION_NEW
self.name = 'NoNameMission'
self.lock = threading.Lock()
self.kwargs = None
self.schedule = None
self.codebase = None
# ---
def newSJob(self, id, modulename, classname):
sjob = SJob(self, id)
sjob.modulename = modulename
sjob.classname = classname
self.jobs[id] = sjob
#self.unfinished[id] = sjob
return sjob
def setup(self, kwargs):
self.name = kwargs.get('name', self.name)
self.kwargs = kwargs
def submit(self): pass # Callback when mission was submitted
def prepare(self): pass # Callback when mission's first job run
def finished(self): pass # Callback when all jobs finished.
def jobFinished(self, job, params): pass
def notify(self, channel, node, job, params):
return (CC.RET_OK, CC.SVC_SCHEDULE, 0)
class SJob(SJobBase):
def __init__(self, mission, id):
self.id = id
self.state = CC.SJOB_NEW
self.mission = mission
self.codebase = mission.codebase
self.jobid = '%s:%s' % (mission.id, id)
self.prevReady = []
self.prev = []
self.next = []
self.inResource = []
self.outResource = []
self.attrs = {}
self.params = None
self.name = 'NoNameJob'
self.runat = None
self.modulename = None
self.classname = None
def need(self, job):
self.prev.append(job)
job.next.append(self)
def fire(self):
self.createtime = time.time()
self.state = CC.SJOB_WAIT
self.mission.jobs[self.id] = self
self.mission.waitJobs[self.id] = self
def setup0(self, **kwargs):
self.setup(kwargs)
def setup(self, kwargs):
self.params = kwargs
self.name = kwargs.get('jobname', self.name)
def getJobParams(self):
job = {}
job['codebase'] = self.codebase
job['module'] = self.modulename
job['class'] = self.classname
job['jobid'] = self.jobid
job['jid'] = self.id
job['jname'] = self.name
job['mname'] = self.mission.name
job['params'] = self.params
return job
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# HUB Service
#
import os
import sys
import constants as CC
from service import ServiceBase
from protocol import O3Channel, O3Call
from utility import D as _D
class HubService(ServiceBase):
SVCID = CC.SVC_HUB
svcDescription = "HUB Service"
svcName = 'HUB'
svcVersion = '0.0.1.4'
def __init__(self, server):
self.server = server
self.codebase = {}
def setup(self, config):
cf = config['hub']
self.paths = cf['paths']
sys.path.append(self.paths['codebase'])
def loadCodeBase(self, name):
path = '%s/%s.codebase' % (self.paths['codebase'], name)
if not os.path.isfile(path):
return None
fin = file(path, 'r')
content = fin.read()
fin.close()
l = {}
try:
exec content in globals(), l
except:
return None
if l.has_key('codebase'):
return l['codebase']
return None
def unloadCodeBase(self, name):
codebase = self.loadCodeBase(name)
if self.codebase.has_key(name):
del self.codebase[name]
for m in codebase['modules']:
try:
del sys.modules[m]
except KeyError:
pass
return True
def cleanCodeBaseCache(self, names):
ret = list()
if type(names) == str:
names = (names, )
for name in names:
if self.codebase.has_key(name):
del self.codebase[name]
ret.append(name)
return ret
# ---
def _o3unloadCodeBase(self, name, node):
oc = O3Channel()
oc.connect(node[1])
oc(CC.SVC_WORKSPACE, 'UNLOADCODEBASE', name)
oc.close()
# ---
def exportO3UNLOADCODEBASE(self, channel, name):
# First: Clean codebase in hub scope
self.unloadCodeBase(name)
S = O3Channel()
S.connect(self.server.resolv('SCHEDULE'))
res = S(CC.SVC_SCHEDULE, 'LISTWORKSPACES')
nodes = res[2]
S.close()
# Three: Clean codebase in workspace scope on all nodes
for node in nodes:
self.server.delayCall0(self._o3unloadCodeBase, name, node)
_D('O3 unload codebase {%s} in %d nodes' % (name, len(nodes)))
return (CC.RET_OK, self.SVCID, name)
# ---
def exportGETCODEBASE(self, channel, name, version):
if self.codebase.has_key(name):
return (CC.RET_OK, self.SVCID, self.codebase[name])
codebase = self.loadCodeBase(name)
if codebase == None:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
self.codebase[name] = codebase
return (CC.RET_OK, self.SVCID, codebase)
# FEATURE/448
# ---
def exportUNLOADO3LIB(self, channel):
res = O3Call(self.server.resolv('SCHEDULE'),
CC.SVC_SCHEDULE, 'LISTWORKSPACES')
for node in res[2]:
O3Call(node[1], CC.SVC_WORKSPACE, 'UNLOADO3LIB')
return (CC.RET_OK, self.SVCID, len(res[2]))
# ---
def exportUNLOADCODEBASE(self, channel, name):
self.unloadCodeBase(name)
return (CC.RET_OK, self.SVCID, 0)
# ---
def exportCLEANCODEBASECACHE(self, channel, names):
ret = self.cleanCodeBaseCache(names)
return (CC.RET_OK, self.SVCID, ret)
def exportLISTCODEBASECACHE(self, channel):
return (CC.RET_OK, self.SVCID, self.codebase.keys())
# ---
def exportGETSCRIPTFILE(self, channel, name):
path = '%s/%s' % (self.paths['scriptbase'], name)
if not os.path.isfile(path):
return (CC.RET_ERROR, self.SVCID, CC.ERROR_SPACE_NO_SUCH_SNIP)
fin = file(path, 'r')
contents = fin.read()
fin.close()
return (CC.RET_OK, self.SVCID, name, len(contents), contents)
# ---
def exportNODEJOIN(self, channel, nodeinfo):
return (CC.RET_OK, self.SVCID, nodeinfo['id'])
def exportNODELEAVE(self, channel, nodeid):
return (CC.RET_OK, self.SVCID, nodeid)
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Base service module
#
import threading
import socket
import cPickle as pickle
import struct
import os
import constants as CC
from protocol import CreateMessage0, GetMessageFromSocket, CreateMessage
class ServiceException(Exception):
def __init__(self, *param):
Exception.__init__(self, *param)
# ====
class ServiceBase(object):
# ----
def dispatch(self, channel, param):
funcname = param[1]
try:
func = getattr(self, 'export%s' % funcname)
except AttributeError:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_FUNCTION)
param = param[2:]
return func(channel, *param)
# ----
def setup(self, conf): pass
def activate(self): pass
# ----
def getCurrentPingInfo(self):
return 'OK'
# ====
class BaseService(ServiceBase):
SVCID = CC.SVC_BASE
svcName = 'BASE'
svcVersion = '0.0.0.1'
svcDescription = "Base Service"
def __init__(self, server):
self.server = server
def exportLISTSERVICE(self, channel):
return (CC.RET_OK, self.SVCID, self.root.svc.keys())
def exportSHELLSCRIPT(self, channel, script):
fin = os.popen(script)
content = fin.read()
fin.close()
return (CC.RET_OK, self.SVCID, content)
def exportPYTHONSCRIPT(self, channel, script):
try:
g = globals()
l = {}
exec script in g, l
return (CC.RET_OK, self.SVCID, l.get('result', None))
except:
return (CC.RET_ERROR, self.SVCID, 0)
# ====
class EchoService(ServiceBase):
SVCID = CC.SVC_ECHO
svcDescription = "Echo Service"
def exportECHO(self, channel, str):
return (CC.RET_OK, self.SVCID, str)
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Base server module
#
BASESERVER_VERSION = '0.0.2.2'
import threading
import socket, struct, time
import cPickle as pickle
import sys, os
import Queue
import constants as CC
import fastmap
from service import BaseService
from protocol import CreateMessage0, GetMessageFromSocket, CreateMessage
from protocol import O3Call, O3Channel
from utility import D as _D, DE as _E, D2 as _D2
from debuginfo import SVCIDToStr
VERBOSE_CALL = 1
# ------
#def hostid2name(id):
# if len(id) == 5:
# return '%s0%s' % (id[:4], id[4])
# elif len(id) == 7:
# return '%s%s%s' % (id[:4], chr(ord('a') + int(id[4:6]), chr[6]))
# return id
# ====
class TimerItem(object):
def __init__(self, id, interval, repeat, function, args = [], kwargs = {}):
self.last = time.time()
self.interval = interval
self.id = id
self.function = function
self.args = args
self.kwargs = kwargs
self.repeat = repeat
class SecondTimer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, name = 'SECONDER')
self.timer = {}
self.order = []
self.finished = threading.Event()
self.current = time.time()
self.lock = threading.Lock()
def add(self, id, interval, repeat, function, args = [], kwargs = {}):
self.lock.acquire()
timer = TimerItem(id, interval, repeat, function, args, kwargs)
self.timer[id] = timer
self.order.append(id)
self.order.sort()
self.lock.release()
def remove(self, id):
self.lock.acquire()
self.order.remove(id)
del self.timer[id]
self.lock.release()
def run(self):
while True:
self.finished.wait(1.0)
if not self.order:
continue
self.lock.acquire()
try:
self.current = time.time()
for t in list(self.order):
timer = self.timer[t]
if self.current - timer.last < timer.interval:
continue
if timer.repeat:
timer.last = self.current
timer.function(*timer.args, **timer.kwargs)
else:
del self.timer[t]
self.order.remove(t)
finally:
self.lock.release()
# ====
O3_SERVICES = {
'workspace': 'workspace/WorkSpace',
'space': 'space/Space',
'names': 'names/Name',
'autoconfig': 'autoconfig/AutoConfig',
'hub': 'hub/Hub',
'schedule': 'schedule0/Schedule0',
'warehouse': 'warehouse/Warehouse',
}
O3_SERVICES_ORDER = [
'space', 'workspace', 'names', 'autoconfig',
'hub', 'schedule', 'warehouse',
]
# ====
def CommonThreadWorker(queue):
while True:
task = queue.get()
try:
task[0](*task[1], **task[2])
except Exception, e:
_E(e)
pass
queue.task_done()
class CommonThreadPool(object):
def __init__(self, threads, maxsize):
self.queue = Queue.Queue(maxsize)
self.pool = []
self.threads = threads
def start(self):
for i in range(self.threads):
thr = threading.Thread(
name='COMMONWORKER-%d' % i,
target = CommonThreadWorker, args=(self.queue,))
thr.setDaemon(True)
self.pool.append(thr)
thr.start()
def addTask(self, func, *args, **kwargs):
task = [func, args, kwargs]
self.queue.put(task)
# ====
class ServerBase(object):
daemonThreads = True
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
requestQueueSize = 50
allowReuseAddress = True
verbose = 0
svcVersion = BASESERVER_VERSION
def __init__(self):
self.socket = socket.socket(
self.addressFamily, self.socketType)
if self.allowReuseAddress:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.svc = dict()
self.svcText = dict()
self.bases = None
# ----
def setup(self, config):
self.cf = config
self.starttime = int(time.time())
common = self.cf['common']
sys.path.append('%s/lib/o3' % common['base'])
debugopt = common.get('debug')
if debugopt:
for key in debugopt.split(','):
if key == 'call':
self.verbose |= VERBOSE_CALL
# ID and entry
self.entry = common.get('entry')
self.id = common.get('id')
self.zone = common.get('zone')
# For Second Timer
self.second = SecondTimer()
serveraddr = common.get('listen', None)
if not serveraddr:
serveraddr = ('0.0.0.0', common.get('entry')[1])
self.serverAddress = serveraddr
self.socket.bind(serveraddr)
self.localnames = common.get('names', None)
self.namesaddr = self.localnames['NAMES']
ulog = common.get('ulog')
if ulog:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect(ulog['addr'])
sys.modules['o3grid.utility'].LogSetup(
{'socket': s, 'hostname': self.id})
threads = common.get('threadpoolsize', 5)
queuesize = common.get('queuesize', 300)
self.threadPool = CommonThreadPool(threads, queuesize)
self.threadPool.start()
_D('O3 Server/%s Listen on %s:%d - [fastmap/%s]' % (self.svcVersion,
serveraddr[0], serveraddr[1], fastmap.__REVISION__))
def setupServices(self):
# Base Service
bases = BaseService(self)
bases.setup(self.cf)
self.registerService(bases)
for sn in O3_SERVICES_ORDER:
if not self.cf.has_key(sn):
continue
sstr = O3_SERVICES[sn]
(mname, cname) = sstr.split('/')
modname = 'o3grid.%s' % mname
__import__(modname)
S = getattr(sys.modules[modname], '%sService' % cname)(self)
S.setup(self.cf)
self.registerService(S)
_D('%s__%s service ready' % (S.svcName, S.svcVersion), 'S')
self.localSpace = self.svc.get(CC.SVC_SPACE, None)
# ----
def register(self, svcid, svc, svcText):
self.svc[svcid] = svc
self.svcText[svcid] = svcText
if svcid == CC.SVC_BASE:
self.bases = svc
def registerService(self, svc):
self.register(svc.SVCID, svc, svc.svcDescription)
# ----
def activate(self):
self.socket.listen(self.requestQueueSize)
self.second.setDaemon(True)
self.second.start()
for s in self.svc.values():
s.activate()
def serveForever(self):
while True:
self.handleRequest()
def handleRequest(self):
(ins, addr) = self.socket.accept()
thr = threading.Thread(
name = 'BASEWORKER',
target = self.processRequestThread,
args = (ins, addr))
if self.daemonThreads:
thr.setDaemon(1)
thr.start()
# ---
def processRequestThread(self, ins, addr):
try:
while True:
params = GetMessageFromSocket(ins)
svcid = params[0]
retcode = 999
retinfo = None
try:
try:
svc = self.svc[svcid]
except KeyError:
ins.send(CreateMessage(
CC.RET_ERROR, CC.SVC_SYSTEM, CC.ERROR_NO_SERVICE))
retcode = CC.RET_ERROR
continue
ret = svc.dispatch(ins, params)
if type(ret[0]) != int:
retinfo = ret[1]
ret = ret[0]
ins.send(CreateMessage0(ret))
retcode = ret[0]
except Exception, e:
_E(e)
raise e
finally:
if self.verbose & VERBOSE_CALL:
currD = _D
else:
currD = _D2
if retinfo:
if type(retinfo) == str:
if retinfo != 'dontlog':
currD('{%s.%s} P:%d RC:%d + %s' % (
SVCIDToStr(svcid), params[1], len(params) - 2, retcode, retinfo), 'C')
elif type(retinfo) == list or type(retinfo) == tuple:
if retinfo[0] != 'dontlog':
currD('{%s.%s} P:%d RC:%d + %s' % (
SVCIDToStr(svcid), params[1], len(params) - 2, retcode, ' '.join(retinfo)), 'C')
elif not retinfo.has_key('dontlog'):
currD('{%s.%s} P:%d RC:%d + %s' % (
SVCIDToStr(svcid), params[1], len(params) - 2, retcode,
' '.join([ '%s:%s' % (k.split('.', 1)[-1], retinfo[k]) for
k in sorted(retinfo)])
), 'C')
else:
currD('{%s.%s} P:%d RC:%d' % (
SVCIDToStr(svcid), params[1], len(params) - 2, retcode), 'C')
# socket error
except struct.error:
return
finally:
ins.close()
return
def resolv(self, name):
if name.startswith('LOCAL__'):
name = name[7:]
return self.localnames.get(name, None)
if self.localnames.has_key(name):
return self.localnames[name]
#channel = O3Channel()
#channel.connect(self.namesaddr)
res = O3Call(self.namesaddr, CC.SVC_NAMES, 'RESOLV', name)
#channel.close()
return res[2]
# ---
def addTimer(self, id, interval, repeat, function, args = [], kwargs = {}):
self.second.add(id, interval, repeat, function, args, kwargs)
def addTimer2(self, id, interval, repeat, function, args = [], kwargs = {}):
self.second.add(id, interval, repeat, self.delayCall, args = (function, args, kwargs))
def removeTimer(self, id):
self.second.remove(id)
def delayCall(self, function, args, kwargs):
self.threadPool.queue.put((function, args, kwargs))
def delayCall0(self, function, *args, **kwargs):
self.threadPool.queue.put((function, args, kwargs))
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Auto Configure Policy
#
import socket
import constants as CC
class AutoConfigPolicy(object):
def __init__(self, service):
self.service = service
self.storageGroup = ['z01', 'z03', 'z15', 'z26']
def autoConfig0(self, channel, group, hostid):
#if not hostid.startswith('p-'):
# hid = "p-%s" % hostid
#else:
# hid = hostid
hid = hostid
realname = hid + '.o3-grid-info.io8.org'
#if hid.startswith('p-cnn'):
# realname = hid
#else:
# realname = '%s-in' % hid
ip = socket.gethostbyname(realname)
BASE = '/is/app/o3'
common = {
'name': hid,
'id': hid,
'entry': (ip, CC.DEFAULT_PORT),
'zone': 'o3dev',
'base': BASE,
'names': {
'HUB': ('10.6.32.197', CC.DEFAULT_PORT),
'NAMES': ('10.6.32.197', CC.DEFAULT_PORT),
'SCHEDULE': ('10.6.32.197', CC.DEFAULT_PORT),
'WAREHOUSE': ('10.6.32.197', CC.DEFAULT_PORT),
'RESULT': ('10.4.170.220', CC.DEFAULT_PORT), # p-cn39
},
'debug': 'call',
'ulog': {
'addr': ('10.6.32.197', CC.DEFAULT_LOG_PORT)
},
}
space = {
'path': '/'.join((BASE, 'tmp/storage')),
'roommode': 'autoconfig',
}
workspace = {
'base': '/'.join((BASE, 'tmp/run')),
}
if hid in self.storageGroup:
workspace['tag'] = 'storage'
_C = {
'common': common,
'space': space,
'workspace': workspace,
}
return (CC.RET_OK, self.service.SVCID, _C)
def getVersion(self):
return 'is-autoconfig-0.0.0.3'
Policy = AutoConfigPolicy
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Autoconfig server in O3 grids
#
import threading
import sys
from service import ServiceBase
import constants as CC
class AutoConfigService(ServiceBase):
SVCID = CC.SVC_AUTOCONFIG
svcDescription = "Auto config service"
svcName = 'AUTOCONFIG'
svcVersion = '0.0.1.0'
def __init__(self, server):
self.server = server
def setup(self, cf):
cf = cf.get('autoconfig')
self.policyName = cf['policy']
__import__(self.policyName)
self.policy = sys.modules[self.policyName].Policy(self)
def exportAUTOCONFIG0(self, channel, group, hostid):
return self.policy.autoConfig0(channel, group, hostid)
def exportRELOADPOLICY(self, channel):
del sys.modules['o3grid.autoconfigpolicy']
del self.policy
__import__('o3grid.autoconfigpolicy')
self.policy = sys.modules['o3grid.autoconfigpolicy'].Policy(self)
return (CC.RET_OK, self.SVCID, self.policy.getVersion())
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# First class schedule
#
from __future__ import with_statement
SCHEDULE0_VERSION = '0.0.1.8'
import sys, time
import operator, random
import threading
import Queue
from service import ServiceBase
from protocol import O3Channel
import constants as CC
from utility import D as _D, D2 as _D2, DE as _E, FileLogger
# ===
ACTION_WORKSPACEADVERT = 1
ACTION_NEWNODEJOIN = 3
ACTION_STARTMISSION = 19
ACTION_JOBFINISHED = 22
ACTION_MISSIONFINISHED = 24
ACTION_CANCELMISSION = 25
# ===
class NodeInfo(object):
def __init__(self, id, entry, tag):
self.id = id
self.entry = entry
self.tag = tag
self.last = {}
self.currentJob = None
class ScheduleCore(threading.Thread):
def __init__(self, service, server):
threading.Thread.__init__(self, name = 'SCHEDULE-CORE')
self.service = service
self.server = server
self.nodes = {}
self.missions = {}
self.waitQueue = {}
self.serial = 0
self.queue = Queue.Queue()
self.lock = threading.Lock()
self.needSchedule = False
self.missionLog = FileLogger('../log/O3Mission')
self.jobLog = FileLogger('../log/O3Job')
self.missionHistory = []
self.missionHistorySize = 120
# ---
def setup(self, cf): pass
def secondCheck(self): pass
# ---
def acquire(self):
self.lock.acquire()
def release(self):
self.lock.release()
# ---
def createMission_(self, kwargs):
P = kwargs
priority = P.get('priority', '6')
id = '%s%05d' % (priority, self.serial)
self.serial += 1
modname = P.get('module', None)
__import__(modname)
mod = sys.modules[modname]
missionClassName = P.get('missionclass', 'O3Mission')
MissionClass = getattr(mod, missionClassName)
mission = MissionClass(id, P)
mission.setup(P)
mission.schedule = self
return mission
# ---
def cancelMission_(self, id):
mission = self.missions.get(id, None)
# mission wasn't found
if not mission:
return
# mission wasn't in active state
if mission.state not in (CC.SMISSION_READY, CC.SMISSION_DOING):
return
# clean jobs in mission.waitJob
mission.waitJobs.clear()
# clean jobs in mission.readyJobs
if len(mission.readyJobs):
readyJobIDs = mission.readyJobs.keys()
for queue in self.waitQueue.values():
for job in list(queue):
if job.mission == mission and job.id in readyJobIDs:
queue.remove(job)
mission.readyJobs.clear()
# clean jobs in mission.runJobs:
if len(mission.runJobs):
for job in mission.runJobs.values():
node = self.nodes.get(job.runat, None)
if not node:
continue
node.currentJob = None
mission.runJobs.clear()
mission.state = CC.SMISSION_CANCEL
del self.missions[id]
self.pushToMissionHistory_(mission)
# --- cancelMission
def cancelMission(self, id):
with self.lock:
self.cancelMission_(id)
# ---
def pushToMissionHistory_(self, mission):
if len(self.missionHistory) > self.missionHistorySize:
self.missionHistory.pop(0)
self.missionHistory.append(mission)
# ---
def submitMission(self, kwargs):
self.lock.acquire()
try:
m = self.createMission_(kwargs)
self.missions[m.id] = m
self.queue.put((ACTION_STARTMISSION, m.id))
return m.id
finally:
self.lock.release()
def startMission(self, id):
self.lock.acquire()
try:
m = self.missions[id]
_D('mission {%s|%s} start' % (m.name, m.id), 'S')
m.prepare()
m.state = CC.SMISSION_READY
self.pushReadyJobsToWaitQueue_(m)
self.needSchedule = True
self.schedule_()
finally:
self.lock.release()
# ---
def workspaceAdvert(self, id, entry, tag, starttime, jobs):
self.lock.acquire()
try:
node = self.nodes.get(id, None)
if not node:
node = NodeInfo(id, entry, tag)
node.last['bron'] = time.time()
node.last['start'] = starttime
self.nodes[id] = node
_D2('WS node up {%s=%08X:%s/%s:%d}' % (
id, starttime, tag, entry[0], entry[1]))
self.needSchedule = True
self.queue.put((ACTION_NEWNODEJOIN, id))
if jobs:
node.jobs = jobs
else:
node.jobs = None
node.last['advert'] = time.time()
finally:
self.lock.release()
# ---
def schedule(self):
self.lock.acquire()
try:
self.schedule_()
finally:
self.lock.release()
def schedule_(self):
if not self.needSchedule:
return
self.needSchedule = False
freenodes = [ n for n in self.nodes.values() if
n.currentJob == None]
commonQueue = self.waitQueue.get('common', None)
random.shuffle(freenodes)
while len(freenodes) != 0:
node = freenodes.pop()
queue = self.waitQueue.get(node.id, None)
if not queue and node.tag == 'normal':
queue = commonQueue;
if not queue:
continue
queue.sort(key = operator.attrgetter('jobid'))
job = queue.pop(0)
self.submitJobToNode_(node, job)
def submitJobToNode_(self, node, job):
#_D('submitJobToNode_ %s %s' % (node.id, job.jobid))
node.currentJob = job
del job.mission.readyJobs[job.id]
job.mission.runJobs[job.id] = job
job.runat = node.id
job.state = CC.SJOB_SUBMIT
self.server.delayCall0(self._submitJobToNode, node)
def _submitJobToNode(self, node):
with self.lock:
job = node.currentJob
if job == None:
_D2('cancel submited job at {%s}' % node.id)
return
_D('submit %s|%s:%s to %s' % (
job.jobid, job.mission.name, job.name, node.id), 'S')
job.state = CC.SJOB_RUN
job.submittime = time.time()
jobParams = job.getJobParams()
channel = O3Channel()
channel.connect(node.entry)
res = channel(CC.SVC_WORKSPACE, 'STARTJOB', jobParams)
# TODO Error handler
channel.close()
# ---
def missionNotify(self, channel, nodeid, jobid, params):
self.lock.acquire()
try:
node = self.nodes[nodeid]
mid,jid = jobid.split(':', 1)
mission = self.missions[mid]
job = mission.job[jid]
return mission.notify(channel, node, job, params)
finally:
self.lock.release()
# ---
def jobFinished(self, nodeid, jobid, res, info):
with self.lock:
node = self.nodes[nodeid]
mid,jid = jobid.split(':', 1)
#mission = self.missions[mid]
node.currentJob = None
self.needSchedule = True
mission = self.missions.get(mid, None)
if mission == None:
_D('mission %s cancelled' % (jobid))
self.schedule_()
return
job = mission.jobs[jid]
del mission.runJobs[jid]
job.state = CC.SJOB_FINISHED
if info.has_key('exception'):
_D('job exception raised %s %s:%s' % (
nodeid, jobid, info['exception']['typename']))
self.cancelMission_(mid)
mission.state = CC.SMISSION_EXCEPTION
self.schedule_()
return
try:
mission.jobFinished(job, res)
except Exception, e:
_D('job-finished exception raised %s:%s' % (
jobid, e.__class__.__name__))
self.cancelMission_(mid)
mission.state = CC.SMISSION_EXCEPTION
_E(e)
return
logdetail = []
logdetail.append('n:%s' % nodeid)
logdetail.append('r:%.2fs' % (time.time() - job.submittime))
logdetail.append('w:%.2fs' % (job.submittime - job.createtime))
if type(res) == dict:
if res.has_key('insize0'):
logdetail.append('i:%.2fm' % res['insize0'])
if res.has_key('outsize0'):
logdetail.append('o:%.2fm' % res['outsize0'])
#if res.has_key('debuginfo'):
# logdetail.append('info:%s' % res['debuginfo'])
self.jobLog.L('%s|%s:%s %s' % (
jobid, mission.name, job.name, ' '.join(logdetail)))
if type(res) == dict and res.has_key('debuginfo'):
_D("job-end %s" % res['debuginfo'])
for j in job.next:
j.prev.remove(job)
j.prevReady.append(job)
if len(j.prev) == 0:
self.pushJobToWaitQueue_(mission, j)
#if len(mission.unfinished) + len(mission.queued) == 0:
# self.queue.put((ACTION_MISSIONFINISHED, mission.id))
if len(mission.waitJobs) + len(mission.readyJobs) + len(mission.runJobs) == 0:
self.queue.put((ACTION_MISSIONFINISHED, mission.id))
self.schedule_()
def pushReadyJobsToWaitQueue_(self, mission):
for job in mission.waitJobs.values():
if len(job.prev) == 0:
self.pushJobToWaitQueue_(mission, job)
def pushJobToWaitQueue_(self, mission, job):
# Move job from wait queue to ready queue
del mission.waitJobs[job.id]
mission.readyJobs[job.id] = job
job.state = CC.SJOB_READY
# Push job in global submitting Queue
if job.runat:
runat = job.runat
else:
runat = 'common'
queue = self.waitQueue.get(runat, None)
if queue == None:
queue = list()
self.waitQueue[runat] = queue
queue.append(job)
# TODO sort job by job id or name?
# ---
def missionFinished(self, mid):
with self.lock:
m = self.missions[mid]
del self.missions[mid]
m.finished()
m.state = CC.SMISSION_DONE
self.pushToMissionHistory_(m)
_D('mission {%s|%s} finished' % (m.name, m.id), 'S')
# Log to mission logs
logdetails = []
jobs = getattr(m, 'jobs', None)
if jobs:
if type(jobs) == int: logdetails.append('jobs:%d' % jobs)
else: logdetails.append('jobs:%d' % len(jobs))
size0 = getattr(m, 'insize0', None)
if size0: logdetails.append('ins:%.2fm' % (size0))
size0 = getattr(m, 'outsize0', None)
if size0: logdetails.append('outs:%s.2fm' % (size0))
starttime = getattr(m, 'starttime', None)
if starttime: logdetails.append('during:%.2fs' % (time.time() - starttime))
self.missionLog.L('%s|%s %s' % (
m.id, m.name, ','.join(logdetails)))
# ---
def run(self):
while True:
try:
try:
task = self.queue.get(True, 1)
except Queue.Empty:
self.secondCheck()
continue
if type(task) == tuple or type(task) == list:
if task[0] == ACTION_WORKSPACEADVERT:
self.workspaceAdvert(*task[1:])
elif task[0] == ACTION_JOBFINISHED:
self.jobFinished(*task[1:])
elif task[0] == ACTION_STARTMISSION:
self.startMission(*task[1:])
elif task[0] == ACTION_NEWNODEJOIN:
self.schedule()
elif task[0] == ACTION_MISSIONFINISHED:
self.missionFinished(*task[1:])
elif task[0] == ACTION_CANCELMISSION:
self.cancelMission(*task[1:])
except Exception, e:
_E(e)
self.queue.task_done()
class Schedule0Service(ServiceBase):
SVCID = CC.SVC_SCHEDULE
svcDescription = "Schedule service"
svcName = "SCHEDULE"
svcVersion = SCHEDULE0_VERSION
def __init__(self, server):
self.server = server
self.core = ScheduleCore(self, server)
def setup(self, cf):
self.core.setup(cf)
def activate(self):
self.queue = self.core.queue
self.core.setDaemon(True)
self.core.start()
# ---
def exportWORKSPACEADVERT(self, channel, nodeid, entry, tag, starttime, jobs):
self.queue.put((ACTION_WORKSPACEADVERT, nodeid, entry, tag, starttime, jobs))
return ((CC.RET_OK, self.SVCID, 0), {'dontlog': 1})
def exportLISTWORKSPACES(self, channel, query = None):
self.core.acquire()
try:
res = [(x.id, x.entry, x.tag) for x in self.core.nodes.values()]
finally:
self.core.release()
return (CC.RET_OK, self.SVCID, res)
# TODO detail error message
def exportSUBMITMISSION(self, channel, params):
mid = self.core.submitMission(params)
if mid:
return (CC.RET_OK, self.SVCID, mid)
else:
return (CC.RET_ERROR, self.SVCID, 0)
def exportJOBFINISHED(self, channel, nodeid, jobid, res, info):
self.queue.put((ACTION_JOBFINISHED, nodeid, jobid, res, info))
return ((CC.RET_OK, self.SVCID, 0), "%s %s" % (nodeid, jobid))
def exportMISSIONNOTIFY(self, channel, nodeid, jobid, params):
return self.core.missionNotify(channel, nodeid, jobid, params)
def exportCANCELMISSION(self, channel, missionid):
self.queue.put((ACTION_CANCELMISSION, missionid))
return (CC.RET_OK, self.SVCID, 0)
# -- DOC --
#
# * mission.unfinished - id
# * mission.queued - id
# * mission.finished - id
#
# * waitQueue - job
# * node.currentJob - job
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Name server in O3 grids
#
import threading
from service import ServiceBase
import constants as CC
class NameService(ServiceBase):
SVCID = CC.SVC_NAMES
svcDescription = "Name service"
svcName = 'NAMES'
svcVersion = '0.0.1.0'
def __init__(self, server):
self.server = server
self.lock = threading.Lock()
self.names = {}
def setup(self, conf):
cf = conf.get('names', None)
if not cf:
return
if cf.has_key('names'):
self.names.update(cf['names'])
def exportRESOLV(self, channel, name):
return (CC.RET_OK, self.SVCID, self.names.get(name))
def exportADD(self, channel, name, value, override = False):
self.lock.acquire()
try:
old = self.names.get(name, None)
if old:
if not override:
return (CC.RET_OK, self.SVCID, CC.NAMES_DUP)
else:
self.names[name] = value
return (CC.RET_OK, self.SVCID, CC.NAMES_UPDATE)
else:
self.names[name] = value
return (CC.RET_OK, self.SVCID, CC.NAMES_ADD)
finally:
self.lock.release()
def exportUPDATE(self, channel, name, value):
self.lock.acquire()
try:
if not self.names.has_key(name):
return (CC.RET_OK, self.SVCID, CC.NAMES_EMPTY)
self.names[name] = value
return (CC.RET_OK, self.SVCID, CC.NAMES_UPDATE)
finally:
self.lock.release()
def exportDEL(self, channel, name):
self.lock.acquire()
try:
if not self.names.has_key(name):
return (CC.RET_OK, self.SVCID, CC.NAMES_EMPTY)
del self.names[name]
return (CC.RET_OK, self.SVCID, CC.NAMES_DELETE)
finally:
self.lock.release()
exportDELETE = exportDEL
def exportGETALL(self, channel):
self.lock.acquire()
try:
return (CC.RET_OK, self.SVCID, self.names.keys())
finally:
self.lock.release()
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Warehouse server in O3 grids
#
from __future__ import with_statement
WAREHOUSE_VERSION = '0.0.0.28'
import sys, os, time
import threading
from random import choice, random
import constants as CC
from service import ServiceBase
from protocol import O3Channel, O3Call
from warehousedb import WarehouseDB
from utility import appendinmap, removeinmap, leninmap, FileLogger
from utility import sizeK, RoomLocation, RoomLocationToTuple
from utility import D as _D, D2 as _D2, DE as _E
class NodeInfo(object):
def __init__(self, id, entry = None):
self.id = id
self.entry = entry
self.last = {}
self.last['start'] = 0
self.last['advert'] = time.time()
self.tasks = []
self.state = CC.NODE_STATE_INIT
class WarehouseService(ServiceBase):
SVCID = CC.SVC_WAREHOUSE
svcDescription = "WAREHOUSE service"
svcName = 'WAREHOUSE'
svcVersion = WAREHOUSE_VERSION
def __init__(self, server):
self.server = server
self.lock = threading.Lock()
self.disableCheck = False
self.entityLog = FileLogger('../log/O3Entity')
def setup(self, cf):
cf = cf['warehouse']
self.db = WarehouseDB()
self.db.setup(cf['dburl'])
self.nodes = {}
for n in set([r.node for r in self.db.room.values()]):
self.nodes[n] = NodeInfo(n)
self.taskByEntity = {}
self.tasks = {}
self.server.addTimer2('warehouse_cyclecheck', 1, True, self.mainCheck)
self.actionlast = {}
for i in ('entityone', 'cleanoldobject',
'nodeoffline', 'flushdb'):
self.actionlast[i] = int(time.time()) + 10
# ------
def mainCheck(self):
with self.lock:
if self.disableCheck:
return
cur = int(time.time())
last = self.actionlast
# Bug - too long time to checkEntityOne_
if cur - last['entityone'] > 120:
last['entityone'] = cur
self.checkEntityOne_()
if cur - last['cleanoldobject'] > 20:
last['cleanoldobject'] = cur
self.cleanObjectInDB_()
if cur - last['nodeoffline'] > 5:
last['nodeoffline'] = cur
self.checkNodeOffline_()
if cur - last['flushdb'] > 60:
last['flushdb'] = cur
self.flushDB_()
# -----
def registerOuterTask_(self, task):
entity = self.db.entity.get(
task.get('entityid', None), None)
room = self.db.room.get(
task.get('roomid', None), None)
taskid = task.get('taskid')
self.tasks[taskid] = task
if room:
self.nodes[room.node].tasks.append(taskid)
if entity:
appendinmap(self.taskByEntity, entity.id, taskid)
def unregisterOuterTask_(self, task):
entity = self.db.entity.get(
task.get('entityid', None), None)
room = self.db.room.get(
task.get('roomid', None), None)
taskid = task.get('taskid')
del self.tasks[taskid]
if room:
self.nodes[room.node].tasks.remove(taskid)
if entity:
removeinmap(self.taskByEntity, entity.id, taskid)
def arrange_(self, **task):
if task['task'] == 'MORESHADOW':
return self.moreMirror_(task)
print 'ARRANGE_END'
# ---
def moreMirror_(self, task):
entity = task['entity']
if entity.state > CC.ENTITY_STATE_READY:
return False
# Check taskByEntity
if leninmap(self.taskByEntity, entity.id) >= 1:
return False
shadows = self.db.shadowByEntity.get(entity.id, '')
if len(shadows) >= entity.mirrors:
if len([s for s in shadows if
s.state == CC.SHADOW_STATE_OK or
s.state == CC.SHADOW_STATE_MIRROR ]) >= entity.mirrors:
if entity.state == CC.ENTITY_STATE_SHADOWING:
entity.state = CC.ENTITY_STATE_READY
self.db.flush()
return False
room = self.allocateRoom0_(entity)
if not room:
return False
size = sizeK(entity.size)
sourceshadow = None
if leninmap(self.db.shadowByEntity, entity.id) != 0:
for s in self.db.shadowByEntity[entity.id]:
room0 = self.db.room[s.room]
node0 = self.nodes[room0.node]
if s.state == CC.SHADOW_STATE_OK and \
room0.state == CC.ROOM_STATE_OK and \
node0.state == CC.NODE_STATE_ONLINE:
if sourceshadow:
if sourceshadow.id < s.id:
sourceshadow = s
else:
sourceshadow = s
shadow = self.db.addShadow(room, entity)
room.used += size
res = self.mirrorShadow_(sourceshadow, shadow)
entity.state = CC.ENTITY_STATE_SHADOWING
self.db.flush()
return res
# ===
def dropEntity_(self, entityid):
res = None
entity = self.db.entity[entityid]
if leninmap(self.db.shadowByEntity, entityid) != 0:
shadows = self.db.shadowByEntity[entityid]
res = []
for s in shadows:
if s.state <= CC.SHADOW_STATE_OK:
s.state = CC.SHADOW_STATE_DROPED
room = self.db.room[s.room]
if self.nodes[room.node].state != CC.NODE_STATE_ONLINE:
continue
addr = room.addr
label = room.label
name = entity.name
res.append((addr, label, name))
entity.state = CC.ENTITY_STATE_DROPED
del self.db.entityByName[entity.name]
self.db.flush()
return res
def dropRoom_(self, roomid): pass
# ===
# ===
def mirrorShadow_(self, src, dst):
entity = self.db.entity[dst.entity]
droom = self.db.room[dst.room]
dentry = (droom.addr, CC.DEFAULT_WAREHOUSE_PORT)
if src:
sroom = self.db.room[src.room]
sentry = (sroom.addr, CC.DEFAULT_PORT)
srcloc = RoomLocation(sroom.node,
(sroom.addr, CC.DEFAULT_WAREHOUSE_PORT),
sroom.label, entity.name)
srcnode = sroom.node
srclabel = sroom.label
else:
srcloc = entity.source
srcnode, sentrystr, srcpath = srcloc.split(':', 2)
saddr, sport = sentrystr.split(',')
sentry = (saddr, int(sport))
srclabel = srcpath.split('/', 1)[0][1:]
taskid = 'RM-%d-%d' % (droom.id, dst.id)
task = {
'action': 'MIRROR',
'taskid': taskid,
'shadowid': dst.id,
'entityid': entity.id,
'roomid': droom.id,
'source': srcloc,
'destroomlabel': droom.label,
'name': entity.name,
'size': entity.size,
'mtime': entity.mtime,
'starttime': int(time.time()),
}
_D2('entity mirror {%d=R%d-E%d,name:%s,from:%s/%s,to:%s/%s}' % (
dst.id, droom.id, entity.id, entity.name,
srcnode, srclabel, droom.node, droom.label))
self.entityLog.L('SM E%d=S%d %s:%s/%s %s/%s' % (
entity.id, dst.id, droom.node, droom.label, entity.name,
srcnode, srclabel))
# TODO Error handler
S = O3Channel().connect(dentry)
res = S(CC.SVC_SPACE, 'ROOMMIRROR', task)
S.close()
if res[0] == CC.RET_OK:
dst.state = CC.SHADOW_STATE_MIRROR
dst.last = time.time()
self.registerOuterTask_(task)
return True
else:
dst.state = CC.SHADOW_STATE_UNUSED
dst.last = time.time()
return False
def allocateRoom0_(self, entity):
rooms = self.db.room
size = sizeK(entity.size)
shadows = self.db.shadowByEntity.get(entity.id, None)
if shadows:
nodes = set([ rooms[s.room].node for s in shadows ])
else:
nodes = []
mintasks = 4
arooms = []
sumspace = 0
for r in rooms.values():
if r.state != CC.ROOM_STATE_OK:
continue
freespace = r.capacity - r.used
if freespace < size:
continue
if r.node in nodes:
continue
node = self.nodes[r.node]
if node.state != CC.NODE_STATE_ONLINE:
continue
if len(node.tasks) > mintasks:
continue
if len(node.tasks) == mintasks:
sumspace += freespace
arooms.append((r, freespace))
continue
mintasks = len(node.tasks)
sumspace = freespace
arooms = [(r, freespace)]
#arooms = [ r for r in rooms.values() if
# r.state == CC.ROOM_STATE_OK and
# r.capacity - r.used > size and
# r.node not in nodes and
# self.nodes[r.node].state == CC.NODE_STATE_ONLINE and
# len(self.nodes[r.node].tasks) < 3 ]
if len(arooms) == 0:
return None
selector = random() * sumspace
for x in arooms:
selector -= x[1]
if selector <= 0:
return x[0]
return arooms[-1][0]
# ---
def cleanNodeTasks_(self, node):
if len(node.tasks):
for x in list(node.tasks):
if x.startswith('RM-'):
taskinfo = self.tasks[x]
shadow = self.db.shadow[taskinfo['shadowid']]
shadow.state = CC.SHADOW_STATE_FAILED
self.db.flush()
self.unregisterOuterTask_(x)
def cleanObjectInDB_(self):
session = self.db.session
cur = int(time.time())
fobj = []
# Clean room
rooms = [ r for r in self.db.room.values() if
r.state == CC.ROOM_STATE_DROPED ]
for r in rooms:
r.active = r.id
r.last = cur
fobj.append(r)
_D('_CLEANDB_ remove room {%s=%d/_%s}' % (r.node, r.id, r.label))
del self.db.room[r.id]
# Clean Nodes
if len(rooms) != 0:
nodes = set([ r.node for r in self.db.room.values() ])
for node in self.nodes:
if node not in nodes:
_D('_CLEANDB_ remove room node {%s=%d}' % (
self.nodes[node].id, self.nodes[node].last['start']))
del self.nodes[node]
# Clean entity
# for e in [e for e in self.db.entity.values() if e.state == CC.ENTITY_STATE_DROPED]:
# print '%d: %d' % (e.id, leninmap(self.db.shadowByEntity, e.id))
entitys = [ e for e in self.db.entity.values() if
(e.state == CC.ENTITY_STATE_ILL or
e.state == CC.ENTITY_STATE_DROPED) and
leninmap(self.db.shadowByEntity, e.id) == 0]
for e in entitys:
e.active = e.id
e.last = cur
fobj.append(e)
#session.flush()
#session.expunge(e)
del self.db.entity[e.id]
try: del self.db.shadowByEntity[e.id]
except: pass
try: del self.taskByEntity[e.id]
except: pass
_D2('_CLEANDB_ remove entity {%d=%s}' % ( e.id, e.name))
# Clean shadow
shadows = [ s for s in self.db.shadow.values() if
s.state == CC.SHADOW_STATE_FAILED or
s.state == CC.SHADOW_STATE_DROPED ]
for s in shadows:
room = self.db.room[s.room]
entity = self.db.entity[s.entity]
room.used -= sizeK(entity.size)
s.active = s.id
s.last = cur
fobj.append(s)
del self.db.shadow[s.id]
removeinmap(self.db.shadowByEntity, s.entity, s)
_D2('_CLEANDB_ remove shadow {S%d:E%d=%s}' % (
s.id, s.entity, self.db.entity[s.entity].name))
if fobj:
_D('_CLEANDB_ clean %d objects' % len(fobj))
self.db.flush()
for o in fobj:
session.expunge(o)
def checkEntityOne_(self):
actions = 20
for e in self.db.entity.values():
if actions == 0:
return
if leninmap(self.taskByEntity, e.id) != 0:
continue
if leninmap(self.db.shadowByEntity, e.id) < e.mirrors:
self.arrange_(task = 'MORESHADOW', entity = e)
actions -= 1
continue
def roomAlive(s):
room = self.db.room[s.room]
node = self.nodes[room.node]
if room.state == CC.ROOM_STATE_OK or \
room.state == CC.ROOM_STATE_LOCK:
return True
else:
return False
shadows = [ s for s in self.db.shadowByEntity[e.id] if
(s.state == CC.SHADOW_STATE_OK or
s.state == CC.SHADOW_STATE_MIRROR) and
roomAlive(s)]
if len(shadows) < e.mirrors:
self.arrange_(task = 'MORESHADOW', entity = e)
actions -= 1
continue
# ---
def checkNodeOffline_(self):
cur = time.time()
for node in self.nodes.values():
if node.state != CC.NODE_STATE_OFFLINE and cur - node.last['advert'] > 40:
node.state = CC.NODE_STATE_OFFLINE
_D('room offline {%s=%08X}' % (node.id, node.last['start']))
# ===
def flushDB_(self):
self.db.flush()
def resetDB_(self, force = False):
self.db.resetDB(force)
return 0
# ===
def exportFLUSHDB(self, channel):
with self.lock:
self.db.flush()
return (CC.RET_OK, self.SVCID, 0)
# ===
def exportRESETDB(self, channel, force = False):
with self.lock:
res = self.resetDB_(force)
return (CC.RET_OK, self.SVCID, 0)
# ===
def exportGETENTITYSHADOW(self, channel, entityid):
with self.lock:
shadows = self.db.shadowByEntity.get(entityid, None)
if not shadow:
return (CC.RET_OK, self.SVCID, 0, 0)
readyshadows = [ s for s in shadows if s.state == CC.SHADOW_STATE_OK ]
return (CC.RET_OK, self.SVCID, len(readyshadows), len(shadows))
# ===
def exportSETENTITYINFO(self, channel, entityid, info):
with self.lock:
res = self.db.setEntityInfo(entityid, info)
if not res:
return (CC.RET_OK, self.SVCID, 0)
else:
return (CC.RET_ERROR, self.SVCID, res)
# ---
def exportGETACTIVETASKS(self, channel):
with self.lock:
return (
(CC.RET_OK, self.SVCID, len(self.tasks)), 'dontlog')
def exportGETACTIVETASKSBYSOURCENODE(self, channel, node):
with self.lock:
tasks = []
for i in self.tasks:
if i.startswith('RM-'):
task = self.tasks[i]
snode = RoomLocationToTuple(task['source'])[0]
if snode == node:
tasks.append(i)
return (
(CC.RET_OK, self.SVCID, tasks), 'dontlog')
# ---
def exportAUTOCONFIG(self, channel, zone, nodeid):
with self.lock:
rooms = self.db.getRoomByNode(nodeid)
if rooms == None:
return (CC.RET_OK, self.SVCID, None)
cf = {'rooms': [ (x.id, x.label, x.base, x.capacity, x.used) for x in rooms ]}
return (CC.RET_OK, self.SVCID, cf)
# ---
def exportLISTALLNODE(self, channel):
with self.lock:
return (CC.RET_OK, self.SVCID, self.db.getNodeList())
# ---
def exportADDENTITY(self, channel, einfo):
with self.lock:
res = self.db.addEntity(einfo)
if type(res) == int:
return (CC.RET_ERROR, self.SVCID, res)
_D('add entity {%d=%s:%s} size=%.2fM' % (
res.id, einfo['node'], einfo['path'], res.size / 1024.0 / 1024))
self.entityLog.L('EA E%d=%s %.2fm' % (res.id, res.name, res.size / 1024 / 1024))
self.arrange_(task = 'MORESHADOW', entity = res)
return (CC.RET_OK, self.SVCID, res.id)
# ---
def exportLISTROOM(self, channel):
with self.lock:
rooms = [
[room.id, room.node, room.label, room.base,
room.capacity, room.used, room.state] for room in self.db.room.values() ]
return (CC.RET_OK, self.SVCID, rooms)
def exportCLEANROOM(self, channel, roomid):
with self.lock:
room = self.db.room[roomid]
shadows = [ s for s in self.db.shadow.values() if s.room == room.id and
s.state <= CC.SHADOW_STATE_OK ]
names = [ self.db.entity[s.entity].name for s in shadows ]
entry = (room.addr, CC.DEFAULT_PORT)
S = O3Channel().connect(entry)
res = S(CC.SVC_SPACE, 'ROOMCLEAN', room.label, names)
S.close()
return (CC.RET_OK, self.SVCID, res[2])
def exportCHECKROOMSHADOW(self, channel, roomid):
with self.lock:
room = self.db.room.get(roomid, None)
if not room:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
if self.nodes[room.node].state != CC.NODE_STATE_ONLINE:
return (CC.RET_ERROR, self.SVCID, ERROR_NETWORK)
res = O3Call((room.addr, CC.DEFAULT_PORT), CC.SVC_SPACE, 'ROOMSHADOWLIST', room.label)
if res[0] != CC.RET_OK:
return (CC.RET_ERROR, self.SVCID, res[2])
with self.lock:
count = 0
exists = set(res[2])
roomused = 0
for shadow in [ s for s in self.db.shadow.values() if
s.room == room.id and
s.state == CC.SHADOW_STATE_OK ]:
entity = self.db.entity[shadow.entity]
if entity.name not in exists:
_D2('missing entity {%d=R%d-E%d:%s/_%s:%s}' % (
shadow.id, room.id, shadow.entity, room.node, room.label,
entity.name))
count += 1
shadow.state = CC.SHADOW_STATE_FAILED
else:
roomused += sizeK(entity.size)
if room.used != roomused:
room.used = roomused
self.db.flush()
return (CC.RET_OK, self.SVCID, count)
# ---
def exportDROPENTITY(self, channel, entityid):
with self.lock:
if type(entityid) == str:
entity = self.db.entityByName.get(entityid, None)
if not entity:
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
entityid = entity.id
elif not self.db.entity.has_key(entityid):
return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
res = self.dropEntity_(entityid)
if res:
# DROP SHADOW directly
for loc in res:
S = O3Channel()
try:
S.connect((loc[0], CC.DEFAULT_WAREHOUSE_PORT))
S(CC.SVC_SPACE, 'ROOMDROPSHADOW', loc[1], loc[2])
except:
pass
finally:
S.close()
return (CC.RET_OK, self.SVCID, 0)
# ---
def exportADDROOM(self, channel, roominfo):
with self.lock:
room = self.db.addRoom(roominfo)
if room == None:
return (CC.RET_ERROR, self.SVCID, 0)
if not self.nodes.has_key(room.node):
self.nodes[room.node] = NodeInfo(room.node)
_D2('add room {%d=%s:%s:%s:%d}' % (
room.id, room.node, room.id, room.base, room.capacity))
S = O3Channel().connect((room.addr, CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMADDCONFIG', (
room.id, room.label, room.base, room.capacity, room.used))
S.close()
return (CC.RET_OK, self.SVCID, room.id)
def exportDROPROOM(self, channel, roomid):
with self.lock:
if not self.db.room.has_key(roomid):
return (CC.RET_ERROR, self.SVCID, ERROR_NO_SUCH_OBJECT)
self.dropRoom_(self, roomid)
return (CC.RET_OK, self.SVCID, ERROR_NO_SUCH_OBJECT)
# ---
def exportARRANGE(self, channel, task):
with self.lock:
res = self.arrange_(task)
return (CC.RET_OK, self.SVCID, res)
# ---
def exportMIRRORFINISHED(self, channel, taskinfo):
with self.lock:
shadow = self.db.shadow[taskinfo['shadowid']]
entity = self.db.entity[taskinfo['entityid']]
result = taskinfo['result']
if result == 0 and shadow.state == CC.SHADOW_STATE_MIRROR:
shadow.state = CC.SHADOW_STATE_OK
if taskinfo.has_key('compressedsize'):
self.db.setEntityInfo(entity.id, {
'size': taskinfo['compressedsize'],
'comment': 'rsize=%d' % (entity.size)})
else:
shadow.state = CC.SHADOW_STATE_FAILED
self.db.flush()
try:
self.unregisterOuterTask_(taskinfo)
self.arrange_(task = 'MORESHADOW', entity = entity)
except:
pass
return ((CC.RET_OK, self.SVCID, 0), 'dontlog')
# ---
def exportROOMADVERT(self, channel, nodeid, entry, starttime, tasks, rooms):
with self.lock:
node = self.nodes.get(nodeid, None)
if not node:
node = NodeInfo(nodeid)
self.nodes[nodeid] = node
_D('WH empty node up {%s=%08X:%s:%d}' % (
nodeid, starttime, entry[0], entry[1]))
node.entry = entry
node.last['start'] = starttime
node.last['born'] = time.time()
node.last['advert'] = time.time()
node.state = CC.NODE_STATE_ONLINE;
return ((CC.RET_OK, self.SVCID, 0), 'dontlog')
#return (CC.RET_ERROR, self.SVCID, CC.ERROR_NO_SUCH_OBJECT)
if node.last['start'] == 0:
node.last['born'] = time.time()
node.last['start'] = starttime
node.entry = entry
self.nodes[id] = node
_D('WH node up {%s=%08X:%s/%s:%d}' % (
nodeid, starttime,
','.join([str(r) for r in rooms]),
entry[0], entry[1]))
node.last['advert'] = time.time()
node.state = CC.NODE_STATE_ONLINE
return ((CC.RET_OK, self.SVCID, 0), 'dontlog')
if node.last['start'] == starttime:
node.last['advert'] = time.time()
if node.state != CC.NODE_STATE_ONLINE:
_D('WH node online {%s=%08X}' % (node.id, node.last['start']))
node.state = CC.NODE_STATE_ONLINE
return ((CC.RET_OK, self.SVCID, 0), 'dontlog')
# room restarted
_D('WH node restart {%s=%08X:%s/%s:%d}' % (
nodeid, starttime,
','.join([str(r) for r in rooms]),
entry[0], entry[1]))
self.cleanNodeTasks_(node)
node.last['start'] = starttime
node.last['advert'] = time.time()
return ((CC.RET_OK, self.SVCID, 0), 'dontlog')
# ===
def exportLISTENTITY0(self, channel, name):
with self.lock:
result = [[e.id, e.name, e.mtime, e.size] for e in
self.db.entity.values() if
e.state == CC.ENTITY_STATE_READY and
e.name.startswith(name)]
return ((CC.RET_OK, self.SVCID, result),
'query:%s entity:%d' % (name, len(result)))
# ===
def exportLISTENTITY1(self, channel, name):
with self.lock:
result = [ {'id':e.id, 'name': e.name, 'mtime': e.mtime, 'size':e.size} for e in
self.db.entity.values() if
e.state == CC.ENTITY_STATE_READY and
e.name.startswith(name)]
return ((CC.RET_OK, self.SVCID, result),
'query:%s entity:%d' % (name, len(result)))
# ===
def exportLISTENTITYLOCATION0(self, channel, eids):
with self.lock:
result = {}
for e in eids:
entity = self.db.getEntity(e)
if not entity:
result[e] = None
else:
shadows = self.db.shadowByEntity.get(entity.id, None)
if not shadows:
result[e] = None
else:
result[e] = [ (s.id,
self.db.room[s.room].node,
self.db.room[s.room].addr,
self.db.room[s.room].label,
entity.name,
entity.size) for s in shadows if s.state == CC.SHADOW_STATE_OK]
if len(result[e]) == 0:
result[e] = None
return (CC.RET_OK, self.SVCID, result)
# ===
def exportLISTENTITYLOCATION1(self, channel, eids):
with self.lock:
result = {}
for e in eids:
entity = self.db.getEntity(e)
if not entity:
result[e] = None
else:
shadows = self.db.shadowByEntity.get(entity.id, None)
if not shadows:
result[e] = None
else:
result[e] = [ {
'id': s.id,
'node': self.db.room[s.room].node,
'addr':self.db.room[s.room].addr,
'label': self.db.room[s.room].label,
'name': entity.name,
'size': entity.size } for s in
shadows if s.state == CC.SHADOW_STATE_OK ]
if len(result[e]) == 0:
result[e] = None
return (CC.RET_OK, self.SVCID, result)
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Constants
#
DEFAULT_PORT = 50333
DEFAULT_LOG_PORT = 50332
DEFAULT_WAREHOUSE_PORT = 50333
DEFAULT_BASE = '/is/app/o3'
# ====
SVC_SYSTEM = 1
SVC_BASE = 2
SVC_SCHEDULE = 1003
SVC_SCHE = SVC_SCHEDULE
SVC_WORKSPACE = 1004
SVC_WS = SVC_WORKSPACE
SVC_FILESYSTEM = 1005
SVC_FS = SVC_FILESYSTEM
SVC_SPACE = SVC_FILESYSTEM
SVC_LOGGING = 1006
SVC_LOG = SVC_LOGGING
SVC_HUB = 1010
SVC_NAMES = 1011
SVC_AUTOCONFIG = 1012
SVC_WAREHOUSE = 1013
SVC_ECHO = 10011
SVC_TEST = 10012
SVC_TIME = 10013
# ====
RET_ERROR = 0
RET_ERR = 0
RET_OK = 200
RET_CONTINUE = 302
# ====
NAMES_ADD = 1
NAMES_UPDATE = 2
NAMES_DELETE = 3
NAMES_EMPTY = 4
NAMES_DUP = 5
# ====
ENTITY_STATE_INIT = 1
ENTITY_STATE_SHADOWING = 2
ENTITY_STATE_READY = 3
ENTITY_STATE_UNAVAILABLE = 4
ENTITY_STATE_DROPING = 5
ENTITY_STATE_DROPED = 6
ENTITY_STATE_ILL = 7
# ====
SJOB_NEW = 0
SJOB_READY = 300
SJOB_WAIT = 100
SJOB_SUBMIT = 500
SJOB_RUN = 600
SJOB_FINISHED = 2800
SJOB_CANCEL0 = 2700
SJOB_CANCEL1 = 2600
SMISSION_NEW = 0
SMISSION_READY = 200
SMISSION_DOING = 400
SMISSION_DONE = 2800
SMISSION_CANCEL = 2700
SMISSION_EXCEPTION = 2600
# ====
ROOM_STATE_OK = 1
ROOM_STATE_LOCK = 2
ROOM_STATE_DROPING = 3
ROOM_STATE_DROPED = 4
ROOM_STATE_UNAVAILABLE = 5
# ====
NODE_STATE_INIT= 0
NODE_STATE_ONLINE = 1
NODE_STATE_OFFLINE = 2
# ====
SHADOW_STATE_INIT = 1
SHADOW_STATE_MIRROR = 2
SHADOW_STATE_OK = 3
SHADOW_STATE_DROPED = 4
SHADOW_STATE_UNAVAILABLE = 5
SHADOW_STATE_UNUSED = 6
SHADOW_STATE_FAILED = 7
# ====
ERROR_UNKNOWN = 0
ERROR_NO_SERVICE = 1001
ERROR_NO_FUNCTION = 1002
ERROR_NETWORK = 1053
ERROR_NO_SUCH_OBJECT = 1054
ERROR_SPACE_PUT = 2001
ERROR_SPACE_NO_SUCH_SNIP = 2012
ERROR_SPACE_NO_SUCH_ROOM = 2013
ERROR_WAREHOUSE_DUPLICATION_NAME = 3001
ERROR_WAREHOUSE_DUPLICATION_ROOMLABEL = 3002
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# DB Layer for Warehouse
#
__all__ = [
'WarehouseDB',
'Entity',
'Room',
]
from sqlalchemy import *
import time
import constants as CC
from utility import appendinmap, removeinmap
from utility import sizeK
class Room(object): pass
class Entity(object): pass
class Shadow(object): pass
class WarehouseDB(object):
def setup(self, dbURL):
self.url = dbURL
engine = create_engine(dbURL)
metadata = BoundMetaData(engine)
self.engine = engine
self.metadata = metadata
tables = {}
for i in ('room', 'entity', 'shadow'):
tables[i] = Table(i, metadata, autoload = True)
self.tables = tables
mappers = {}
mappers['room'] = mapper(Room, tables['room'])
mappers['entity'] = mapper(Entity, tables['entity'])
mappers['shadow'] = mapper(Shadow, tables['shadow'])
self.mappers = mappers
session = create_session(bind_to = engine)
self.session = session
self.qRoom = session.query(Room)
self.qEntity = session.query(Entity)
self.qShadow = session.query(Shadow)
self.room = {}
self.entity = {}
self.shadow = {}
self.entityByName = {}
self.shadowByEntity = {}
# Load all data from database.
res = self.qRoom.select_by(active = 0)
if res:
for r in res:
self.room[r.id] = r
res = self.qEntity.select_by(active = 0)
if res:
for r in res:
self.entity[r.id] = r
self.entityByName[r.name ] = r
res = self.qShadow.select_by(active = 0)
if res:
for r in res:
self.shadow[r.id] = r
appendinmap(self.shadowByEntity, r.entity, r)
# ---
def resetDB(self, force):
self.session.flush()
self.session.clear()
self.room.clear()
self.entity.clear()
self.shadow.clear()
self.entityByName.clear()
self.shadowByEntity.clear()
# Load all data from database.
res = self.qRoom.select_by(active = 0)
if res:
for r in res:
self.room[r.id] = r
res = self.qEntity.select_by(active = 0)
if res:
for r in res:
self.entity[r.id] = r
self.entityByName[r.name ] = r
res = self.qShadow.select_by(active = 0)
if res:
for r in res:
self.shadow[r.id] = r
appendinmap(self.shadowByEntity, r.entity, r)
# ------
def flush(self):
try:
self.session.flush()
except:
self.session.flush()
def getNodeList(self):
return list(set([x.node for x in self.room.values()]))
def getRoomByNode(self, node):
return [ r for r in self.room.values() if r.node == node ]
# -----
def addShadow(self, room, entity):
s = Shadow()
s.room = room.id
s.entity = entity.id
s.mtime = entity.mtime
s.state = CC.ENTITY_STATE_INIT
s.active = 0
s.last = int(time.time())
self.session.save(s)
self.flush()
self.shadow[s.id] = s
appendinmap(self.shadowByEntity, entity.id, s)
return s
# ------
def addRoom(self, ri):
node = ri['node']
label = ri['label']
rooms = [ r for r in self.room.values() if
r.node == node and r.label == label ]
if len(rooms) != 0:
return None
room = Room()
room.node = node
room.label = ri['label']
room.zone = ri.get('zone', 0)
room.addr = ri['addr']
room.base = ri['base']
room.capacity = ri['capacity']
room.used = 0
room.state = 1
room.last = int(time.time())
room.active = 0
self.session.save(room)
self.session.flush()
self.room[room.id] = room
return room
# ------
def addEntity(self, ei):
name = ei.get('name')
if self.entityByName.has_key(name):
return CC.ERROR_WAREHOUSE_DUPLICATION_NAME
e = Entity()
e.name = ei.get('name')
e.zone = ei.get('zone', 0)
e.source = '%s:%s,%d:_Z/%s' % (
ei['node'], ei['entry'][0], ei['entry'][1], ei['path'])
e.size = ei.get('size')
e.mtime = ei.get('mtime')
e.state = CC.ENTITY_STATE_INIT
e.active = 0
e.mirrors = ei.get('mirrors', 2)
e.comment = ei.get('comment', None)
e.tag = ei.get('tag', None)
self.session.save(e)
#self.session.flush()
self.flush()
self.entity[e.id] = e
self.entityByName[e.name] = e
return e
# ===
def setEntityInfo(self, eid, info):
if type(eid) == str:
e = self.entityByName.get(eid, None)
elif type(eid) == int or type(eid) == long:
e = self.entity.get(eid, None)
else:
e = None
if not e:
return CC.ERROR_NO_SUCH_OBJECT
if e.active != 0:
return CC.ERROR_NO_SUCH_OBJECT
for k in ('source', 'tag', 'label', 'comment', 'mtime'):
if info.has_key(k):
setattr(e, k, info[k])
# size -- need update all shadows' room's used value
if info.has_key('size'):
shadows = self.shadowByEntity.get(e.id, None)
if shadows:
size0 = sizeK(e.size)
size1 = sizeK(info['size'])
for room in [ self.room[s.room] for s in
shadows if s.active == 0 ]:
room.used -= size0
room.used += size1
e.size = info['size']
self.flush()
return 0
# ===
def getEntity(self, en):
if type(en) == int or type(en) == long:
return self.entity.get(en, None)
entity = self.entityByName.get(en, None)
if entity:
return entity
try:
return self.entity.get(int(en), None)
except:
return None
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Config
#
import os
def GetConfigCode(name):
paths = [
'%s/%s' % ('/is/app/o3/etc', name),
name ]
for fn in paths:
if os.path.isfile(fn):
break
fin = file(fn, 'r')
configcode = fin.read()
fin.close()
return configcode
def Load(name):
configcode = GetConfigCode(name)
exec configcode
return _C
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# SNIP
#
# ==Description
# Simple space stroage client
#
import socket
import constants as CC
import cStringIO as StringIO
from protocol import CreateMessage, GetMessageFromSocket, O3Space, O3Call
class RemoteSnipClient(object):
def __init__(self, space):
self.addr = space
self.error = None
def getTransport(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
return s
def PUT(self, id, content):
length = len(content)
s = self.getTransport()
try:
s.connect(self.addr)
s.send(CreateMessage(CC.SVC_SPACE, 'PUT', id, length, None))
params = GetMessageFromSocket(s)
if params[0] != CC.RET_CONTINUE:
self.error = params[2]
return False
s.send(content)
params = GetMessageFromSocket(s)
if params[0] == CC.RET_OK:
return True
else:
self.error = params[2]
return False
finally:
s.close()
def GET(self, id):
s = self.getTransport()
try:
s.connect(self.addr)
s.send(CreateMessage(CC.SVC_SPACE, 'GET', id, None))
params = GetMessageFromSocket(s)
if params[0] == CC.RET_ERROR:
self.error = params[2]
return None
length = params[3]
rest = length
content = []
flags = socket.MSG_WAITALL
while rest != 0:
if rest > 32768:
buffer = s.recv(32768, flags)
else:
buffer = s.recv(rest)
if not buffer:
break
rest -= len(buffer)
content.append(buffer)
if rest != 0:
self.error = CC.ERROR_NETWORK
return None
params = GetMessageFromSocket(s)
return ''.join(content)
finally:
s.close()
def DELETE(self, id):
s = self.getTransport()
try:
s.connect(self.addr)
s.send(CreateMessage(CC.SVC_SPACE, "DELETE", id))
params = GetMessageFromSocket(s)
if params[0] == CC.RET_OK:
return True
else:
self.error = params[2]
return False
finally:
s.close()
| Python |
#!/usr/bin/python
from o3grid import constants as CC
BASE = '/is/app/o3'
def B(p, b = BASE):
return '%s/%s' % (b, p)
common = {
'name': 'z00',
'id': 'z00',
'zone': 'o3dev',
'entry': ('10.6.32.197', CC.DEFAULT_PORT),
'base': BASE,
'names': {
'HUB': ('10.6.32.197', CC.DEFAULT_PORT),
'NAMES': ('10.6.32.197', CC.DEFAULT_PORT),
'SCHEDULE': ('10.6.32.197', CC.DEFAULT_PORT),
'WAREHOUSE': ('10.6.32.197', CC.DEFAULT_PORT),
'RESULT': ('10.4.170.220', CC.DEFAULT_PORT), # p-cn39
},
'ulog': {
'addr': ('10.6.32.197', CC.DEFAULT_LOG_PORT)
},
'threadpoolsize': 10,
#'debug': 'call',
}
hub = {
'paths': {
'codebase': B('env/codebase'),
'scriptbase': B('env/codebase'),
}
}
space = {
'path': B('tmp/storage'),
}
workspace = {
'base': B('tmp/run'),
'respath': '/data1/o3res',
'tag': 'center',
}
autoconfig = {
'policy': 'o3grid.autoconfigpolicy',
}
warehouse = {
'dburl': 'mysql://o3:o3indexdb@o3db/o3',
}
names = {
'names': {
'BIGRESULT': ('10.6.33.213', CC.DEFAULT_PORT), # p-dx70
},
}
_C = {
'common': common,
'hub': hub,
'space': space,
'workspace': workspace,
'names': names,
'autoconfig': autoconfig,
'schedule': None,
'warehouse': warehouse,
}
| Python |
#!python2.5
import os
from o3grid.service import BaseService, EchoService
from o3grid.hub import HubService
from o3grid.baseserver import ServerBase
from o3grid import config
from o3grid.utility import D
from o3grid.protocol import O3Channel
from o3grid import constants as CC
def readfile(fn):
fin = file(fn, 'r')
contents = fin.read()
fin.close()
return contents.strip()
def main():
NODEID = readfile('/is/app/o3/etc/NODEID')
AUTOS = readfile('/is/app/o3/etc/AUTOS')
channel = O3Channel()
channel.connect((AUTOS, CC.DEFAULT_PORT))
res = channel(CC.SVC_AUTOCONFIG, 'AUTOCONFIG0', 'o3', NODEID)
channel.close()
C = res[2]
del res
S = ServerBase()
S.setup(C)
S.setupServices()
S.activate()
S.serveForever()
if __name__ == '__main__':
main()
| Python |
import socket
import time
fout = file('/is/app/o3/log/o3.log', 'a')
sin = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
sin.bind(('0.0.0.0', 50332))
while True:
try:
buf = sin.recv(300)
log = '%s %s' % (time.strftime('%m%d %H:%M:%S'), buf)
fout.write(log)
fout.write('\n')
fout.flush()
print log
except KeyboardInterrupt, e:
break
except:
pass
sin.close()
fout.close()
| Python |
#!/usr/bin/python
import pprint
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
import time
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT), CC.SVC_WAREHOUSE, 'FLUSHDB')
pprint.pprint(res)
| Python |
from o3lib import fs
import Queue
queue = Queue.Queue()
fs.O3EntityReader(queue,
label = '0',
node = 'p-dx69',
name = 'test/TEST.iz0',
addr = '10.6.39.218',
entityid = 4498)
fout = file('/tmp/TEST_ER01', 'wb')
while True:
c = queue.get()
if not c:
break
print len(c)
fout.write(c)
fout.close()
| Python |
#!/usr/bin/python
import pprint,sys
from o3grid import constants as CC
from o3grid.protocol import O3Call
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_HUB, 'UNLOADO3LIB')
| Python |
#!/usr/bin/python
import pprint,sys
import time
from o3grid import constants as CC
from o3grid.protocol import O3Call
import o3testmisc
#S = O3Channel()
#S.connect(('127.0.0.1', CC.DEFAULT_PORT))
#res = S(CC.SVC_SCHEDULE, 'SUBMITMISSION',
# 'ls01', {
# 'module': 'logsplit01.logsplit01',
# 'missionclass': 'O3Mission',
# })
if len(sys.argv) >= 2:
datename = sys.argv[1]
else:
datename = '2007/01/18'
dname = datename.replace('/', '.')
#res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
# CC.SVC_HUB, 'O3UNLOADCODEBASE', 'oneday01')
#if o3testmisc.IsDebugMission('oneday01'):
# for logname in ('uume', 'dzh', 'tt', 'itv'):
# res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
# CC.SVC_SCHEDULE, 'CLEANMISSION', 'OD01-%s-%s' % (logname, dname))
# res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
# CC.SVC_HUB, 'O3UNLOADCODEBASE', 'oneday01')
# time.sleep(2)
#time.sleep(2)
for logname in ('uume', 'itv', 'dzh', 'tt', 'hi', 'passport'):
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'SUBMITMISSION', {
'name': 'OD01-%s-%s' % (logname, dname),
'module': 'oneday01.oneday01',
'missionclass': 'O3Mission',
'prefix': 'plog/%s/%s' % (logname, datename),
})
print '%s|OD01-%s-%s' % (res[2], logname, dname)
| Python |
#!/usr/bin/python
import pprint
import sys
from o3grid import constants as CC
from o3grid.protocol import O3Channel
import time
entitys = ['1']
if len(sys.argv) > 1:
entitys = []
for x in sys.argv[1:]:
try:
entitys.append(int(x))
except ValueError:
entitys.append(x)
S = O3Channel().connect(('localhost', CC.DEFAULT_PORT))
for e in entitys:
res = S(CC.SVC_WAREHOUSE, 'DROPENTITY', e)
print res
#res = S(CC.SVC_WAREHOUSE, 'CLEANROOM', 1)
#pprint.pprint(res)
S.close()
#name = 'plog/uume/2005/12/%02d/%02d00' % (d, h)
#path = '/pub/plog/data/2006/12/%02d/%02d00' % (d, h)
#print name, path
#S.close()
#print name
#pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint,sys
import time
from o3grid import constants as CC
from o3grid.protocol import O3Call
import o3testmisc
if len(sys.argv) >= 2:
prefix = sys.argv[1]
else:
prefix = 'uume/2007/01/18'
logname, sep, datename = prefix.partition('/')
mid = 'ODT1-%s-%s' % (logname, datename.replace('/', '.'))
prefix = 'plog/' + prefix
if o3testmisc.IsDebugMission('onedaytop100'):
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'CLEANMISSION', mid)
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_HUB, 'O3UNLOADCODEBASE', 'onedaytop100')
time.sleep(2)
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'SUBMITMISSION',
mid, {
'module': 'onedaytop100.onedaytop100',
'missionclass': 'O3Mission',
'prefix': prefix,
})
| Python |
#!/usr/bin/python
import pprint
import socket
from o3grid import constants as CC
from o3grid.protocol import O3Channel, O3Call, GetMessageFromSocket
import time
#res = O3Call(('p-dx59-in', CC.DEFAULT_PORT),
# CC.SVC_SPACE, 'ROOMENTITYSPLIT0', '0', 'plog/uume/2006/12/26/2100', 1024 * 1024 * 256)
#pprint.pprint(res)
S = O3Channel().connect(('p-dx63-in', CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMGET', '0', 'plog/uume/2006/12/26/2100', 0, 1242365418)
pprint.pprint(res)
buf = S.socket.recv(1242365418, socket.MSG_WAITALL)
res = S.getMessage()
pprint.pprint(res)
S.close()
#name = 'plog/uume/2005/12/%02d/%02d00' % (d, h)
#path = '/pub/plog/data/2006/12/%02d/%02d00' % (d, h)
#print name, path
#S.close()
#print name
#pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
import time
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT), CC.SVC_WAREHOUSE, 'FLUSHDB')
pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint,sys
import time
from o3grid import constants as CC
from o3grid.protocol import O3Call
import o3testmisc
#S = O3Channel()
#S.connect(('127.0.0.1', CC.DEFAULT_PORT))
#res = S(CC.SVC_SCHEDULE, 'SUBMITMISSION',
# 'ls01', {
# 'module': 'logsplit01.logsplit01',
# 'missionclass': 'O3Mission',
# })
if len(sys.argv) >= 2:
datename = sys.argv[1]
else:
datename = '2007/01/18'
dname = datename.replace('/', '.')
#res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
# CC.SVC_HUB, 'O3UNLOADCODEBASE', 'oneday01')
#if o3testmisc.IsDebugMission('oneday01'):
# for logname in ('uume', 'dzh', 'tt', 'itv'):
# res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
# CC.SVC_SCHEDULE, 'CLEANMISSION', 'OD01-%s-%s' % (logname, dname))
# res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
# CC.SVC_HUB, 'O3UNLOADCODEBASE', 'oneday01')
# time.sleep(2)
#time.sleep(2)
for logname in ('uume', 'itv', 'dzh', 'tt', 'hi', 'passport'):
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'SUBMITMISSION', {
'name': 'OD01-%s-%s' % (logname, dname),
'module': 'oneday01.oneday01',
'missionclass': 'O3Mission',
'prefix': 'plog/%s/%s' % (logname, datename),
})
print '%s|OD01-%s-%s' % (res[2], logname, dname)
| Python |
#!/usr/bin/python
import pprint,sys
import time
from o3grid import constants as CC
from o3grid.protocol import O3Call
import o3testmisc
if len(sys.argv) >= 2:
prefix = sys.argv[1]
else:
prefix = 'uume/2007/01/18'
logname, sep, datename = prefix.partition('/')
mid = 'ODT1-%s-%s' % (logname, datename.replace('/', '.'))
prefix = 'plog/' + prefix
if o3testmisc.IsDebugMission('onedaytop100'):
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'CLEANMISSION', mid)
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_HUB, 'O3UNLOADCODEBASE', 'onedaytop100')
time.sleep(2)
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'SUBMITMISSION',
mid, {
'module': 'onedaytop100.onedaytop100',
'missionclass': 'O3Mission',
'prefix': prefix,
})
| Python |
#!/usr/bin/python
import pprint,sys
from o3grid import constants as CC
from o3grid.protocol import O3Call
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'CLEANMISSION', 'uume02')
| Python |
#!/usr/bin/python
import pprint
import sys
from o3grid import constants as CC
from o3grid.protocol import O3Channel
import time
entitys = ['1']
if len(sys.argv) > 1:
entitys = []
for x in sys.argv[1:]:
try:
entitys.append(int(x))
except ValueError:
entitys.append(x)
S = O3Channel().connect(('localhost', CC.DEFAULT_PORT))
for e in entitys:
res = S(CC.SVC_WAREHOUSE, 'DROPENTITY', e)
print res
#res = S(CC.SVC_WAREHOUSE, 'CLEANROOM', 1)
#pprint.pprint(res)
S.close()
#name = 'plog/uume/2005/12/%02d/%02d00' % (d, h)
#path = '/pub/plog/data/2006/12/%02d/%02d00' % (d, h)
#print name, path
#S.close()
#print name
#pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint
import sys, os
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
import time
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'plog/uume/2006/12/31/'
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT), CC.SVC_WAREHOUSE, 'LISTENTITY0', name)
pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint, sys, time
from o3grid import constants as CC
from o3grid.protocol import O3Call
#S = O3Channel()
#S.connect(('127.0.0.1', CC.DEFAULT_PORT))
#res = S(CC.SVC_SCHEDULE, 'SUBMITMISSION',
# 'ls01', {
# 'module': 'logsplit01.logsplit01',
# 'missionclass': 'O3Mission',
# })
if len(sys.argv) >= 2:
prefix = sys.argv[1]
else:
prefix = 'plog/uume/2006/12/31'
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_HUB, 'O3UNLOADCODEBASE', 'uume03')
time.sleep(2)
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'SUBMITMISSION',
'uume03', {
'module': 'uume03.uume03',
'missionclass': 'O3Mission',
'prefix': prefix,
})
| Python |
#!/usr/bin/python
import pprint
import sys, os
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
import time
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'plog/uume/2006/12/31/'
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT), CC.SVC_WAREHOUSE, 'LISTENTITY0', name)
pprint.pprint(res)
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITYLOCATION0', [r[0] for r in res[2]])
pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint
from o3grid import constants as CC
from o3grid.protocol import O3Channel
import time
S = O3Channel().connect(('localhost', CC.DEFAULT_PORT))
res = S(CC.SVC_WAREHOUSE, 'LISTROOM')
pprint.pprint(res[2])
for r in res[2]:
res = S(CC.SVC_WAREHOUSE, 'CLEANROOM', r[0])
pprint.pprint(res)
#res = S(CC.SVC_WAREHOUSE, 'CLEANROOM', 1)
#pprint.pprint(res)
S.close()
#name = 'plog/uume/2005/12/%02d/%02d00' % (d, h)
#path = '/pub/plog/data/2006/12/%02d/%02d00' % (d, h)
#print name, path
#S.close()
#print name
#pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint,sys
from o3grid import constants as CC
from o3grid.protocol import O3Call
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_HUB, 'UNLOADO3LIB')
| Python |
import os
HOME = os.environ.get('HOME', '/root')
O3PROFILEDIR = HOME + '/.o3'
def IsDebugMission(missionname):
m1 = O3PROFILEDIR + '/_debug/all'
m2 = O3PROFILEDIR + '/_debug/' + missionname
if os.path.exists(m1):
return True
if os.path.exists(m2):
return True
return False
| Python |
#!/usr/bin/python
import pprint, sys, time
from o3grid import constants as CC
from o3grid.protocol import O3Call
#S = O3Channel()
#S.connect(('127.0.0.1', CC.DEFAULT_PORT))
#res = S(CC.SVC_SCHEDULE, 'SUBMITMISSION',
# 'ls01', {
# 'module': 'logsplit01.logsplit01',
# 'missionclass': 'O3Mission',
# })
if len(sys.argv) >= 2:
prefix = sys.argv[1]
else:
prefix = 'plog/uume/2006/12/31'
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_HUB, 'O3UNLOADCODEBASE', 'uume03')
time.sleep(2)
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'SUBMITMISSION',
'uume03', {
'module': 'uume03.uume03',
'missionclass': 'O3Mission',
'prefix': prefix,
})
| Python |
#!/usr/bin/python
import pprint
import sys, os
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
import time
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'plog/uume/2006/12/31/'
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT), CC.SVC_WAREHOUSE, 'LISTENTITY0', name)
pprint.pprint(res)
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITYLOCATION0', [r[0] for r in res[2]])
pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint,sys
from o3grid import constants as CC
from o3grid.protocol import O3Call
res = O3Call(('127.0.0.1', CC.DEFAULT_PORT),
CC.SVC_SCHEDULE, 'CLEANMISSION', 'uume02')
| Python |
#!/usr/bin/python
import pprint
from o3grid import constants as CC
from o3grid.protocol import O3Channel
import time
S = O3Channel().connect(('localhost', CC.DEFAULT_PORT))
res = S(CC.SVC_WAREHOUSE, 'LISTROOM')
pprint.pprint(res[2])
for r in res[2]:
res = S(CC.SVC_WAREHOUSE, 'CLEANROOM', r[0])
pprint.pprint(res)
#res = S(CC.SVC_WAREHOUSE, 'CLEANROOM', 1)
#pprint.pprint(res)
S.close()
#name = 'plog/uume/2005/12/%02d/%02d00' % (d, h)
#path = '/pub/plog/data/2006/12/%02d/%02d00' % (d, h)
#print name, path
#S.close()
#print name
#pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint
import socket
from o3grid import constants as CC
from o3grid.protocol import O3Channel, O3Call, GetMessageFromSocket
import time
#res = O3Call(('p-dx59-in', CC.DEFAULT_PORT),
# CC.SVC_SPACE, 'ROOMENTITYSPLIT0', '0', 'plog/uume/2006/12/26/2100', 1024 * 1024 * 256)
#pprint.pprint(res)
S = O3Channel().connect(('p-dx63-in', CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMGET', '0', 'plog/uume/2006/12/26/2100', 0, 1242365418)
pprint.pprint(res)
buf = S.socket.recv(1242365418, socket.MSG_WAITALL)
res = S.getMessage()
pprint.pprint(res)
S.close()
#name = 'plog/uume/2005/12/%02d/%02d00' % (d, h)
#path = '/pub/plog/data/2006/12/%02d/%02d00' % (d, h)
#print name, path
#S.close()
#print name
#pprint.pprint(res)
| Python |
#!/usr/bin/python
import pprint
import sys, os
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
import time
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'plog/uume/2006/12/31/'
res = O3Call(('p-dx44-in', CC.DEFAULT_PORT), CC.SVC_WAREHOUSE, 'LISTENTITY0', name)
pprint.pprint(res)
| Python |
#
# O3 base library entry
#
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
__VERSION__ = '0.0.0.2'
class O3(object):
def __init__(self, workspace):
self.ws = workspace
self.localnames = {}
def saveResult(self, name, value, resnodename = 'RESULT'):
respoint = self.localnames.get(
resnodename, self.ws.server.resolv(resnodename))
res = O3Call(respoint,
CC.SVC_SPACE, 'RESULTPUT', name, value)
if res[0] == CC.RET_OK:
return res[2]
else:
return -1
def loadResult(self, name, resnodename = 'RESULT'):
respoint = self.localnames.get(
resnodename, self.ws.server.resolv(resnodename))
res = O3Call(respoint,
CC.SVC_SPACE, 'RESULTGET', name)
if res[0] != CC.RET_OK:
return None
return res[2]
| Python |
from struct import pack as ipack, unpack as iunpack
from zlib import decompress as _decompress, MAX_WBITS
from o3grid import constants as CC
from o3grid.protocol import O3Call, O3Channel
import threading
import Queue
# ------
# File services ...
# ------
def O3EntityReader0(queue, **P):
try:
node = P['node']
addr = P['addr']
label = P['label']
name = P['name']
bs = P.get('blocksize', 8388608)
entityid = P.get('entityid', 0)
size = 0
if name.endswith('.iz0'):
S = O3Channel().connect((addr, CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMGET3',
{'label':label, 'name':name, 'entityid':entityid})
if res[0] != CC.RET_OK:
return
blocks = res[2]
for i in xrange(blocks):
headstr = S.recvAll(32)
# print len(headstr)
blockhead = iunpack('QII4I', headstr)
binsize = blockhead[1]
boutsize = blockhead[2]
ccontent = S.recvAll(binsize)
# print len(ccontent)
# content = _decompress(ccontent, -MAX_WBITS, boutsize)
content = _decompress(ccontent)
queue.put(content)
S.getMessage()
S.close()
else:
S = O3Channel().connect((addr, CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMGET1', label, name, 0, 0, entityid)
if res[0] != CC.RET_OK:
return
size = res[2]
rest = size
while rest != 0:
blocksize = min(rest, bs)
content = S.recvAll(blocksize)
rest -= blocksize
queue.put(content)
S.getMessage()
S.close()
finally:
queue.put(None)
# ===
O3EntityReader = O3EntityReader0
# ======
def StartO3EntityReader(queue, **kwargs):
thr = threading.Thread(
name = "O3EntityReader",
target = O3EntityReader,
args = (queue,),
kwargs = kwargs)
thr.setDaemon(True)
thr.start()
return thr
| Python |
#
# Special compress file format for O3 warehouse
#
# File Structure
# Offset Length
# 0 4B "ISZ0" (4char)
# 4B FLAGS (dowrd)
# 4B VERSION (dword)
# 4B NOUSED, 0
# 16 4B "HD01" (4char)
# 4B NOUSED, 0
# 4B FILE BLOCKS
# 4B ONE BLOCK UNCOMPRESS SIZE
# 8B FILE COMPRESSED SIZE
# 8B FILE DECOMPRESSED SIZE
# 48 16B NOUSED, 0
# 32B BLOCK_ENTRY
# .......
# 65536 BLOCK
#
# ------
# Block entry structure:
# 0 8B OFFSET
# 8 4B BLOCK SIZE
# 12 4B UNCOMPRESSED SIZE
# 16 16B NOUSED - available for other used
# ------
import os, sys, zlib
import binascii
from zlib import compress as _compress, decompress as _decompress
import struct
#class Zipis(object):
# def __init__(self, name): pass
def CompressFile(finame, foname, linemode = True, bs = 16777216, level = 6):
fin = file(finame, 'rb')
fout = file(foname, 'wb')
bi = list() # block index
dbb = 0 # data block base
idsize = 0 # input data size
odsize = 0 # output data size
# seek fout to data block
fout.seek(0x10000, 0)
print "%X" % fout.tell()
looping = True
while looping:
content = fin.read(bs)
if not content: # true if reach end of file
looping = False
break
else:
if linemode: # check end of line is end of block
if content[-1] != '\n':
offset = content.rfind('\n')
if offset != -1:
clen = len(content)
content = content[:offset + 1]
fin.seek(len(content) - clen, 1)
ccontent = _compress(content)
fout.write(ccontent)
bi.append((odsize, len(ccontent), len(content)))
print '%d - %d %d %d %s' % (len(bi), odsize, len(ccontent), len(content), binascii.b2a_hex(ccontent[:16]))
odsize += len(ccontent)
idsize += len(content)
# data compressing finished, build header and write to fout's begin.
head0 = struct.pack(
'4sIII4sIIIQQ4I',
'ISZ0', 0, 0, 0,
'HD01', 0, len(bi), bs,
odsize, idsize,
0, 0, 0, 0)
head1 = ''.join([
struct.pack("QII4I", x[0], x[1], x[2], 0, 0, 0, 0) for x in bi
])
fout.seek(0)
fout.write(head0)
fout.write(head1)
fin.close()
fout.close()
def DecompressFile(finame, foname):
fin = file(finame, 'rb')
fout = file(foname, 'wb')
head = fin.read(0x10000)
filehead = struct.unpack("4sIII4sIIIQQ4I", head[:64])
blocks = filehead[6]
blocksize = filehead[7]
for i in xrange(blocks):
blockhead = struct.unpack("QII4I", head[64 + i * 32: 64 + i * 32 + 32])
print "%d - %d,%d,%d" % (i, blockhead[0], blockhead[1], blockhead[2])
binsize = blockhead[1]
boutsize = blockhead[2]
ccontent = fin.read(binsize)
print binascii.b2a_hex(ccontent[:16])
content = _decompress(ccontent)
fout.write(content)
fin.close()
fout.close()
if __name__ == '__main__':
#CompressFile('/tmp/2300', '/tmp/2300.iz')
DecompressFile('/tmp/TEST.iz0', '/tmp/TEST')
| Python |
O3LIB_VERSION = '0.0.0.1'
| Python |
#!python2.5
import os
from o3grid.service import BaseService, EchoService
from o3grid.hub import HubService
from o3grid.baseserver import ServerBase
from o3grid import config
from o3grid.utility import D
CONFIG = 'config.o3'
def main():
global CONFIG
if os.environ.has_key('O3_CONFIG'):
CONFIG = os.environ['O3_CONFIG']
elif os.environ.has_key('O3_NAME'):
CONFIG = os.environ['O3_NAME'] + ".o3"
# Load Base Server
C = config.Load(CONFIG)
S = ServerBase()
S.setup(C)
S.setupServices()
S.activate()
S.serveForever()
if __name__ == '__main__':
main()
| Python |
#!python2.5
import os
from o3grid.service import BaseService, EchoService
from o3grid.hub import HubService
from o3grid.baseserver import ServerBase
from o3grid import config
from o3grid.utility import D
from o3grid.protocol import O3Channel
from o3grid import constants as CC
def readfile(fn):
fin = file(fn, 'r')
contents = fin.read()
fin.close()
return contents.strip()
def main():
NODEID = readfile('/is/app/o3/etc/NODEID')
AUTOS = readfile('/is/app/o3/etc/AUTOS')
channel = O3Channel()
channel.connect((AUTOS, CC.DEFAULT_PORT))
res = channel(CC.SVC_AUTOCONFIG, 'AUTOCONFIG0', 'o3', NODEID)
channel.close()
C = res[2]
del res
S = ServerBase()
S.setup(C)
S.setupServices()
S.activate()
S.serveForever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
from o3grid import constants as CC
BASE = '/is/app/o3'
def B(p, b = BASE):
return '%s/%s' % (b, p)
common = {
'name': 'z00',
'id': 'z00',
'zone': 'o3dev',
'entry': ('10.6.32.197', CC.DEFAULT_PORT),
'base': BASE,
'names': {
'HUB': ('10.6.32.197', CC.DEFAULT_PORT),
'NAMES': ('10.6.32.197', CC.DEFAULT_PORT),
'SCHEDULE': ('10.6.32.197', CC.DEFAULT_PORT),
'WAREHOUSE': ('10.6.32.197', CC.DEFAULT_PORT),
'RESULT': ('10.4.170.220', CC.DEFAULT_PORT), # p-cn39
},
'ulog': {
'addr': ('10.6.32.197', CC.DEFAULT_LOG_PORT)
},
'threadpoolsize': 10,
#'debug': 'call',
}
hub = {
'paths': {
'codebase': B('env/codebase'),
'scriptbase': B('env/codebase'),
}
}
space = {
'path': B('tmp/storage'),
}
workspace = {
'base': B('tmp/run'),
'respath': '/data1/o3res',
'tag': 'center',
}
autoconfig = {
'policy': 'o3grid.autoconfigpolicy',
}
warehouse = {
'dburl': 'mysql://o3:o3indexdb@o3db/o3',
}
names = {
'names': {
'BIGRESULT': ('10.6.33.213', CC.DEFAULT_PORT), # p-dx70
},
}
_C = {
'common': common,
'hub': hub,
'space': space,
'workspace': workspace,
'names': names,
'autoconfig': autoconfig,
'schedule': None,
'warehouse': warehouse,
}
| Python |
#!python2.5
import os
from o3grid.service import BaseService, EchoService
from o3grid.hub import HubService
from o3grid.baseserver import ServerBase
from o3grid import config
from o3grid.utility import D
CONFIG = 'config.o3'
def main():
global CONFIG
if os.environ.has_key('O3_CONFIG'):
CONFIG = os.environ['O3_CONFIG']
elif os.environ.has_key('O3_NAME'):
CONFIG = os.environ['O3_NAME'] + ".o3"
# Load Base Server
C = config.Load(CONFIG)
S = ServerBase()
S.setup(C)
S.setupServices()
S.activate()
S.serveForever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
SOURCE='/is/app/o3/base'
TARGET='/is/app/o3/o3svn'
def GetINodeNumber(path1):
try:
return os.stat(path1)[1]
except:
return -1
def IsSameFile(path1, path2):
return GetINodeNumber(path1) == GetINodeNumber(path2)
def L(str, chr = '|'):
print '%s %s' % (chr, str)
# ===
def ScanDir(source, target, path = ''):
entries = os.listdir('%s/%s' % (source, path))
entries.sort()
for e in entries:
if e == 'CVS':
continue
if e == '.cvsignore':
continue
if path == '':
rpath = e
else:
rpath = '/'.join((path, e))
aspath = '/'.join((source, rpath))
atpath = '/'.join((target, rpath))
if os.path.islink(aspath):
continue
elif os.path.isfile(aspath):
if rpath.endswith('.pyc'):
continue
if not os.path.exists(atpath):
os.link(aspath, atpath)
L('link %s' % rpath)
continue
if IsSameFile(aspath, atpath):
continue
os.unlink(atpath)
os.link(aspath, atpath)
L('update %s' % rpath)
continue
elif os.path.isdir(aspath):
if not os.path.exists(atpath):
os.mkdir(atpath)
L('mkdir %s' % rpath)
ScanDir(source, target, rpath)
continue
def ScanDir2(source, target, path = ''):
entries = os.listdir('%s/%s' % (source, path))
entries.sort()
for e in entries:
if e == '.svn':
continue
if path == '':
rpath = e
else:
rpath = '/'.join((path, e))
aspath = '/'.join((source, rpath))
atpath = '/'.join((target, rpath))
if os.path.isdir(aspath):
ScanDir2(source, target, rpath)
if not os.path.exists(atpath):
L('rmdir %s' % rpath)
continue
else:
if not os.path.exists(atpath):
L('remove %s' % rpath)
os.unlink(aspath)
continue
ScanDir(SOURCE, TARGET)
ScanDir2(TARGET, SOURCE)
| Python |
#
# ==AUTHOR
# Sin Yu <scaner@gmail.com>
#
# ==MODULE
# Create WareHouse Index DB
#
DBURL = 'mysql://o3:o3indexdb@p-dx44-in/o3'
import sys
sys.path.append('/is/app/o3/lib/o3')
from sqlalchemy import *
def CreateO3WarehouseDatabase(url):
engine = create_engine(url)
metadata = BoundMetaData(engine)
engine.echo = True
roomTable = Table(
'room', metadata,
Column('id', Integer, primary_key = True),
Column('zone', Integer, default = 0),
Column('node', String(20)),
Column('label', String(20), default = '0'),
Column('addr', String(40)),
Column('base', String(20), default = '/data/o3warehouse'),
Column('capacity', Integer),
Column('used', Integer, default = 0),
Column('state', Integer, default = 1),
Column('last', Integer, nullable = True, default = None),
Column('active', Integer, nullable = False, default = 0),
Column('comment', String, nullable = True, default = None),
UniqueConstraint('node', 'label'),
)
roomTable.drop(checkfirst = True)
roomTable.create(checkfirst = True)
RoomDB = (
# ('p-cn25', 0, '10.4.170.197', '/data', 60),
# ('p-cn41', 0, '10.4.170.228', '/data1', 120),
# ('p-dx48', 0, '10.6.33.155', '/data', 30),
# ('p-dx47', 0, '10.6.33.154', '/data', 30),
# ('p-dx60', 0, '10.6.39.209', '/data1', 210),
# ('p-dx86', 0, '10.6.39.66', '/data', 100),
# ('p-dx86', 1, '10.6.39.66', '/data1', 210),
('p-dx53', 0, '10.6.39.202', '/data', 200),
('p-dx56', 0, '10.6.39.205', '/data1', 200),
('p-dx58', 0, '10.6.39.207', '/data', 200),
('p-dx58', 1, '10.6.39.207', '/data1', 200),
('p-dx61', 0, '10.6.39.210', '/data', 180),
('p-dx61', 1, '10.6.39.210', '/data1', 180),
)
for r in RoomDB:
roomTable.insert().execute(
zone = 0, node = r[0], label = str(r[1]),
addr = r[2], base = '%s/o3warehouse' % r[3],
capacity = r[4] * 1024 * 1024,
)
entityTable = Table(
'entity', metadata,
Column('id', Integer, primary_key = True),
Column('zone', Integer),
Column('name', String(255)),
Column('source', String(255)),
Column('size', Integer),
Column('mtime', Integer),
Column('last', Integer),
Column('mirrors', Integer),
Column('state', Integer),
Column('action', Integer),
Column('tag', String(255), nullable = True, default = None),
Column('active', Integer, nullable = False, default = 0),
Column('comment', String(255), nullable = True, default = None),
UniqueConstraint('name', 'active'),
)
entityTable.drop(checkfirst = True)
entityTable.create(checkfirst = True)
shadowTable = Table(
'shadow', metadata,
Column('id', Integer, primary_key = True),
Column('entity', Integer, ForeignKey('entity.id')),
Column('room', Integer, ForeignKey('room.id')),
Column('mtime', Integer),
Column('last', Integer),
Column('taskid', String),
Column('state', Integer),
Column('active', Integer, nullable = False, default = 0),
Column('comment', String),
)
shadowTable.drop(checkfirst = True)
shadowTable.create(checkfirst = True)
engine.dispose()
if __name__ == '__main__':
CreateO3WarehouseDatabase(DBURL)
| Python |
#!/usr/bin/python
import os
SOURCE='/is/app/o3/base'
TARGET='/is/app/o3/o3svn'
def GetINodeNumber(path1):
try:
return os.stat(path1)[1]
except:
return -1
def IsSameFile(path1, path2):
return GetINodeNumber(path1) == GetINodeNumber(path2)
def L(str, chr = '|'):
print '%s %s' % (chr, str)
# ===
def ScanDir(source, target, path = ''):
entries = os.listdir('%s/%s' % (source, path))
entries.sort()
for e in entries:
if e == 'CVS':
continue
if e == '.cvsignore':
continue
if path == '':
rpath = e
else:
rpath = '/'.join((path, e))
aspath = '/'.join((source, rpath))
atpath = '/'.join((target, rpath))
if os.path.islink(aspath):
continue
elif os.path.isfile(aspath):
if rpath.endswith('.pyc'):
continue
if not os.path.exists(atpath):
os.link(aspath, atpath)
L('link %s' % rpath)
continue
if IsSameFile(aspath, atpath):
continue
os.unlink(atpath)
os.link(aspath, atpath)
L('update %s' % rpath)
continue
elif os.path.isdir(aspath):
if not os.path.exists(atpath):
os.mkdir(atpath)
L('mkdir %s' % rpath)
ScanDir(source, target, rpath)
continue
def ScanDir2(source, target, path = ''):
entries = os.listdir('%s/%s' % (source, path))
entries.sort()
for e in entries:
if e == '.svn':
continue
if path == '':
rpath = e
else:
rpath = '/'.join((path, e))
aspath = '/'.join((source, rpath))
atpath = '/'.join((target, rpath))
if os.path.isdir(aspath):
ScanDir2(source, target, rpath)
if not os.path.exists(atpath):
L('rmdir %s' % rpath)
continue
else:
if not os.path.exists(atpath):
L('remove %s' % rpath)
os.unlink(aspath)
continue
ScanDir(SOURCE, TARGET)
ScanDir2(TARGET, SOURCE)
| Python |
import os, sys, time
import socket
import random
O3_BASE_PATH = '/is/app/o3'
O3_LIB_PATH = ['base', 'lib/o3']
sys.path.extend([ '%s/%s' % (O3_BASE_PATH, lib) for lib in O3_LIB_PATH])
from o3grid import constants as CC
from o3grid.protocol import O3Channel, O3Call
# -----
def ReadConfigStrFromFile(fn):
fin = file(fn)
contents = fin.read()
fin.close()
return contents.strip()
# =====
def GetEntity(entity, out):
# get warehouse service entry
warehousenode = ReadConfigStrFromFile(O3_BASE_PATH + '/etc/WAREHOUSE')
print O3_BASE_PATH
warehouse = (socket.gethostbyname(warehousenode + '-in'), CC.DEFAULT_PORT)
# get entity id
res = O3Call(warehouse,
CC.SVC_WAREHOUSE, 'LISTENTITY0', entity)
if res[0] != CC.RET_OK:
raise 'WAREHOUSE.LISTENTITY0:CALL'
if len(res[2]) != 1:
raise 'WAREHOUSE.LISTENTITY0:INVALID-NAME'
entityinfo = res[2][0]
eid = entityinfo[0]
esize = entityinfo[3]
#print 'Eid:%d Esize:%d' % (eid, esize)
# get shadows'id and location
res = O3Call(warehouse,
CC.SVC_WAREHOUSE, 'LISTENTITYLOCATION0', [eid,])
if res[0] != CC.RET_OK:
raise 'WAREHOUSE.LISTENTITYLOCATION0:CALL'
shadows = res[2][eid]
if len(shadows) < 1:
raise 'WAREHOUSE.LISTENTITYLOCATION0:NO-SHADOW-COPY'
sid, snode, saddr, slabel, sname, ssize = random.choice(shadows)
# check out type, create output file object
if out == None:
fout = sys.stdout
if type(out) == str:
fout = file(out, 'w')
if type(out) == file:
fout = out
else:
raise 'XX:OUT'
S = O3Channel().connect((saddr, CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMGET', slabel, sname, 0, ssize, eid)
if res[0] == CC.RET_ERROR:
raise 'SPACE.ROOMGET'
bs = 512000 * 8
rest = ssize
while rest != 0:
if rest > bs:
buf = S.recvAll(bs)
else:
buf = S.recvAll(rest)
if not buf:
break
rest -= len(buf)
fout.write(buf)
S.close()
return ssize
def maindn():
if sys.argv[2] != '-':
fout = file(sys.argv[2], 'w')
else:
fout.sys = stdout
try:
GetEntity(sys.argv[1], fout)
except:
sys.exit(1)
finally:
fout.close()
sys.exit(0)
if __name__ == '__main__':
maindn()
| Python |
import threading, Queue
import os, random, time
import struct, zlib
import cPickle as pickle, cStringIO as StringIO
import operator
from o3grid import constants as CC
from o3grid.utility import cout, D as _D, D2 as _D2, DE as _E
from o3grid.protocol import O3Call, O3Channel, O3Space
from o3grid import job
import o3lib.base
from o3lib.fs import StartO3EntityReader
from fastmap import increase as mapincrease, partition as mappartition
from fastmap import fastdumps, fastloads, fastloads3, partitiondumps
MISSIONNAME = "ONEDAY01"
CODEBASE = "oneday01"
MODULENAME = "oneday01.oneday01"
PARTITIONS = 8
MISSIONPREFIX = 'OD01'
# --- Utility for date related
def WeekPostfix(datename):
dtime = time.strptime(datename, '%Y/%m/%d')
day = int(time.strftime('%w', dtime))
week = time.strftime('%W', date)
if day == 0: day = 7
tz = time.mktime(dtime)
begintz = tz - (3600 * 24 * (day - 1))
endtz = begintz - (3600 * 24 * 6)
return '%s-%s-%s' % (
week,
time.strftime('%m.%d', time.localtime(begintz)),
time.strftime('%m.%d', time.localtime(endtz)))
# --- OneDay01 Mission Class ---
class MOneDay01(job.Mission):
def __init__(self, id, kwargs):
job.Mission.__init__(self, id, kwargs)
#self.name = MISSIONNAME
self.codebase = CODEBASE
def setup(self, kwargs):
job.Mission.setup(self, kwargs)
def prepare(self):
self.starttime = time.time()
self.insize0 = 0.0
self.pv = 0
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITY1', self.kwargs['prefix'])
entitys = res[2]
entitys.sort(key = operator.itemgetter('size'), reverse=True)
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITYLOCATION0', [ e['id'] for e in entitys])
shadows = res[2]
self.hourres = []
self.hourinfo = []
self.partitions = []
_D('%s:--START--:%s' % (self.id, self.kwargs['prefix']), '|')
self.totalJob = self.newSJob('C9-SUM', MODULENAME, 'JOneDaySummary')
self.totalJob.setup0(
jobname = 'SUM',
prefix = self.kwargs['prefix'],
partitions = self.partitions,
hourinfo = self.hourinfo)
self.totalJob.fire()
self.partitionJobs = []
for i in range(PARTITIONS):
job = self.newSJob('C1-P%d' % i, MODULENAME, 'JPVPartitionSumJob')
job.setup0(
jobname = 'P%d' % i,
hourres = self.hourres,
partitionid = i)
job.fire()
self.totalJob.need(job)
self.partitionJobs.append(job)
serial = 0
for e in entitys:
#eid, ename, emtime, esize = e
eid = e['id']
ename = e['name']
emtime = e['mtime']
esize = e['size']
sid, snode, saddr, slabel, sname, size = random.choice(shadows[eid])
taskname = 'C0-%02d' % (serial)
serial += 1
job = self.newSJob(taskname, MODULENAME, 'JPVLogHour')
job.name = job.id
job.setup({
'jobname': ename.split('/')[-1].split('.')[0],
'entityname': ename,
'entityid': eid,
'addr': saddr,
'node': snode,
'label': slabel,
'size': esize,})
job.fire()
for j in self.partitionJobs:
j.need(job)
def jobFinished(self, job, params):
if job.id.startswith('C0-'):
self.hourres.append((params['location'], params['resultid']))
self.insize0 += params.get('insize0', 0.0)
self.hourinfo.append(params.get('restext'))
elif job.id.startswith('C1-'):
self.partitions.append((params['location'], params['resultid']))
elif job.id.startswith('C9-'):
cout('-MISSION-END- {%s|%s:%s} %.2fm %.2fs' % (
self.id, self.name, job.name, self.insize0, time.time() - self.starttime))
# ----- UTILITIES -----
def couttimer(func, *args, **kwargs):
begin = time.time()
res = func(*args, **kwargs)
end = time.time()
cout('%s - %.2fs' % (func.func_name, end - begin))
return res
# ===
def MapPlusList0(map, l):
for (k, v) in l.iteritems():
mapincrease(map, k, v)
# ===
def RemoteReader(queue, node, addr, label, name, size, entityid):
bs = 512000 * 8
try:
S = O3Channel().connect((addr, CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMGET1',
label, name, 0, 0, entityid)
if res[0] != CC.RET_OK:
return
size = res[2]
rest = size
while rest != 0:
buf = S.recvAll(min(bs, rest))
if not buf:
break
rest -= len(buf)
queue.put(buf)
S.getMessage()
S.close()
finally:
queue.put(None)
# --end--
def StartRemoteReader(*args):
thr = threading.Thread(
name = 'REMOTEREADER',
target = RemoteReader,
args = args)
thr.setDaemon(True)
thr.start()
return thr
# ===
class JPVPartitionSumJob(object):
def __init__(self, params, job):
self.jobinfo = job
self.params = params
self.workspace = job['workspace']
def run(self):
params = self.params
partitionid = params['partitionid']
ip = {}
url = {}
ut = {}
uc = {}
for i in self.params['hourres']:
content = O3Space(i[0]).GET('%s_RES_%d' % (i[1], partitionid))
(hip, hurl, hut, huc) = fastloads3(content)
MapPlusList0(ip, hip)
MapPlusList0(url, hurl)
MapPlusList0(ut, hut)
MapPlusList0(uc, huc)
content = fastdumps((ip, url, ut, uc))
S = O3Space(('127.0.0.1', CC.DEFAULT_PORT))
resid = '%s_RES' % self.jobinfo['jobid']
S.PUT(resid, content)
self.jobinfo['result'] = {
'resultid': resid,
'location': self.workspace.server.entry,
}
# ===
class JOneDaySummary(object):
def __init__(self, params, job):
self.jobinfo = job
self.params = params
self.workspace = job['workspace']
def run(self):
params = self.params
ip = {}
url = {}
ut = {}
uc = {}
for i in self.params['partitions']:
content = O3Space(i[0]).GET(i[1])
(hip, hurl, hut, huc) = fastloads(content)
ip.update(hip)
url.update(hurl)
ut.update(hut)
uc.update(huc)
cout('%s ip:%d url:%d ut:%d uc:%d' % (
self.jobinfo['jobid'], len(ip), len(url), len(ut), len(uc)))
O3 = o3lib.base.O3(self.workspace)
# One Day Result
prefix = self.params['prefix']
hourinfo = self.params['hourinfo']
logname, year, month, day = prefix.split('/')[1:]
hourname = 'oneday01/%s/day/%s-%s.%s.%s' % (year, logname, year, month, day)
def hourinfonamekey(x):
return x[0].split('-')[-1]
hourinfo.sort(key = hourinfonamekey)
pv = sum([h[1]['pv'] for h in hourinfo])
hourtxt = '\n'.join([
'%s %s' % (hourinfonamekey(h), ','.join([
'%s:%s' % (i, str(h[1][i])) for i in ('ip', 'ut', 'uc', 'pv', 'url')])
) for h in hourinfo])
daytxt = '%s/%s.%s.%s - ip:%d,ut:%d,uc:%d,pv:%d,url:%d' % (
logname, year, month, day, len(ip), len(ut), len(uc), pv, len(url))
O3.saveResult(hourname, '\n'.join((daytxt, '--hourinfo--', hourtxt)))
self.jobinfo['result'] = 0
# --work-point--
# ===
class JPVLogHour(object):
def __init__(self, params, job):
self.kwargs = params
self.jobinfo = job
self.workspace = job['workspace']
def run(self):
begin = time.time()
params = self.kwargs
entityid = params['entityid']
entityname = params['entityname']
addr = params['addr']
label = params['label']
size = params['size']
node = params['node']
queue = Queue.Queue(10)
#reader = StartRemoteReader(queue, node, addr, label, entityname, size, entityid)
reader = StartO3EntityReader(
queue,
node = node, addr = addr, label = label,
name = entityname, size = size, entityid = entityid)
UL = PVLogCounter0(queue)
UL.count()
cout('%s ip:%d url:%d ut:%d uc:%d' % (
self.jobinfo['jobid'],
len(UL.ip), len(UL.url), len(UL.ut), len(UL.uc)))
# -- Dump dict to string IO buffer
souts = couttimer(UL.dump, PARTITIONS)
S = O3Space(('127.0.0.1', CC.DEFAULT_PORT))
jobid = self.jobinfo['jobid']
for i in range(PARTITIONS):
resid = '%s_RES_%d' % (jobid, i)
S.PUT(resid, souts[i])
self.jobinfo['result'] = {
'resultid': jobid,
'location': self.workspace.server.entry,
'insize0': UL.bytes / 1024.0 / 1024,
'restext':[
self.jobinfo['jname'], {
'pv': UL.lines, 'ip': len(UL.ip), 'url': len(UL.url),
'ut': len(UL.ut), 'uc': len(UL.uc)}
],
'debuginfo': '%s at %s - %.2fMb/%.2fs' % (
jobid,
self.workspace.server.id,
UL.bytes / 1024.0/1024,
time.time() - begin),
}
# ===
class PVLogCounter0(object):
def __init__(self, queue):
self.curline = []
self.lines = 0
self.queue = queue
self.ip = {}
self.url = {}
self.ut = {}
self.uc = {}
self.bytes = 0
def count(self):
uc = self.uc
ut = self.ut
ip = self.ip
url = self.url
queue = self.queue
lines = 0
bytes = 0
pending = ''
loop = True
while loop:
bs = self.queue.get()
if not bs:
loop = False
if pending == '':
continue
tokens = pending.split('\n')
pending = ''
else:
bytes += len(bs)
tokens = bs.split('\n')
tokens[0] = pending + tokens[0]
pending = tokens.pop()
for line in tokens:
try:
l = line.split('\t')
if l[7][0] == '4':
continue
mapincrease(ip, l[2])
mapincrease(url, l[4])
mapincrease(ut, l[11])
mapincrease(uc, l[12])
lines += 1
except Exception, e:
_D('EXCEPTION %s %s' % (repr(e), line))
self.lines = lines
self.bytes = bytes
# ---
def dump(self, n):
#res = []
ips = partitiondumps(self.ip, n)
urls = partitiondumps(self.url, n)
uts = partitiondumps(self.ut, n)
ucs = partitiondumps(self.uc, n)
return [ ''.join((ips[x], urls[x], uts[x], ucs[x])) for x in range(n) ]
# --end-- class PVLogCounter01
def generateJob(job, workspace):
classname = job['class']
G = globals()
C = G[classname]
params = job.get('params', {})
job['workspace'] = workspace
return C(params, job)
O3Mission = MOneDay01
| Python |
#!/usr/bin/python
codebase = {
'name': 'isgrid0',
'version': '0.0.0.1',
'files': [
'isgrid0/__init__.py',
'isgrid0/isgrid0.py',
],
'modules': [
'isgrid0.isgrid0',
'isgrid0',
],
}
| Python |
import threading, Queue
import os, random, time
import struct, zlib
import cPickle as pickle, cStringIO as StringIO
import operator
from o3grid import constants as CC
from o3grid.utility import cout, D as _D, D2 as _D2, DE as _E
from o3grid.protocol import O3Call, O3Channel, O3Space
from o3grid import job
import o3lib.base
import o3lib.fs
from fastmap import increase as mapincrease, partition as mappartition
from fastmap import fastdumps, fastloads, fastloads3, partitiondumps
MISSIONNAME = "ONEDAY02"
CODEBASE = "oneday02"
MODULENAME = "oneday02.oneday02"
PARTITIONS = 8
MISSIONPREFIX = 'OD02'
# --- Utility for date related
def WeekPostfix(datename):
dtime = time.strptime(datename, '%Y/%m/%d')
day = int(time.strftime('%w', dtime))
week = time.strftime('%W', date)
if day == 0: day = 7
tz = time.mktime(dtime)
begintz = tz - (3600 * 24 * (day - 1))
endtz = begintz - (3600 * 24 * 6)
return '%s-%s-%s' % (
week,
time.strftime('%m.%d', time.localtime(begintz)),
time.strftime('%m.%d', time.localtime(endtz)))
# --- OneDay02 Mission Class ---
class MOneDay02(job.Mission):
def __init__(self, id, kwargs):
job.Mission.__init__(self, id, kwargs)
self.name = MISSIONNAME
self.codebase = CODEBASE
def setup(self, kwargs):
self.kwargs = kwargs
def start(self):
self.starttime = time.time()
self.insize0 = 0.0
self.pv = 0
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITY1', self.kwargs['prefix'])
entitys = res[2]
entitys.sort(key = operator.itemgetter('size'), reverse=True)
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITYLOCATION0', [ e['id'] for e in entitys])
shadows = res[2]
self.hourres = []
self.hourinfo = []
self.partitions = []
_D('%s:--START--:%s' % (self.id, self.kwargs['prefix']), '|')
self.totalJob = self.newSJob('C9-SUM', MODULENAME, 'JOneDaySummary')
self.totalJob.setup0(
prefix = self.kwargs['prefix'],
partitions = self.partitions,
hourinfo = self.hourinfo)
self.totalJob.fire()
self.partitionJobs = []
for i in range(PARTITIONS):
job = self.newSJob('C1-P%d' % i, MODULENAME, 'JPVPartitionSumJob')
job.setup0(
hourres = self.hourres,
partitionid = i)
job.fire()
self.totalJob.need(job)
self.partitionJobs.append(job)
serial = 0
for e in entitys:
#eid, ename, emtime, esize = e
eid = e['id']
ename = e['name']
emtime = e['mtime']
esize = e['size']
sid, snode, saddr, slabel, sname, size = random.choice(shadows[eid])
taskname = 'C0-%02d-%s' % (serial, ename.split('/')[-1].split('.')[0])
serial += 1
job = self.newSJob(taskname, MODULENAME, 'JPVLogHour')
job.name = job.id
job.setup0(
entityname = ename,
entityid = eid,
addr = saddr,
node = snode,
label = slabel,
size = esize,)
job.fire()
for j in self.partitionJobs:
j.need(job)
def jobFinished(self, job, params):
if job.id.startswith('C0-'):
self.hourres.append((params['location'], params['resultid']))
self.insize0 += params.get('insize0', 0.0)
self.hourinfo.append(params.get('restext'))
elif job.id.startswith('C1-'):
self.partitions.append((params['location'], params['resultid']))
elif job.id.startswith('C9-'):
cout('-MISSION-END- {%s} %.2fm %.2fs' % (
self.id, self.insize0, time.time() - self.starttime))
# ----- UTILITIES -----
def couttimer(func, *args, **kwargs):
begin = time.time()
res = func(*args, **kwargs)
end = time.time()
cout('%s - %.2fs' % (func.func_name, end - begin))
return res
# ===
def MapPlusList0(map, l):
for (k, v) in l.iteritems():
mapincrease(map, k, v)
# ===
def RemoteReader(queue, node, addr, label, name, size, entityid):
bs = 512000 * 8
try:
S = O3Channel().connect((addr, CC.DEFAULT_PORT))
res = S(CC.SVC_SPACE, 'ROOMGET1',
label, name, 0, 0, entityid)
if res[0] != CC.RET_OK:
return
size = res[2]
rest = size
while rest != 0:
buf = S.recvAll(min(bs, rest))
if not buf:
break
rest -= len(buf)
queue.put(buf)
S.getMessage()
S.close()
finally:
queue.put(None)
# --end--
def StartRemoteReader(queue, **P):
thr = threading.Thread(
name = 'REMOTEREADER',
target = o3lib.fs.O3EntityReader,
args = (queue, ), kwargs = P)
thr.setDaemon(True)
thr.start()
return thr
# ===
class JPVPartitionSumJob(object):
def __init__(self, params, job):
self.jobinfo = job
self.params = params
self.workspace = job['workspace']
def run(self):
params = self.params
partitionid = params['partitionid']
ip = {}
url = {}
ut = {}
uc = {}
for i in self.params['hourres']:
content = O3Space(i[0]).GET('%s_RES_%d' % (i[1], partitionid))
(hip, hurl, hut, huc) = fastloads3(content)
MapPlusList0(ip, hip)
MapPlusList0(url, hurl)
MapPlusList0(ut, hut)
MapPlusList0(uc, huc)
content = fastdumps((ip, url, ut, uc))
S = O3Space(('127.0.0.1', CC.DEFAULT_PORT))
resid = '%s_RES' % self.jobinfo['jobid']
S.PUT(resid, content)
self.jobinfo['result'] = {
'resultid': resid,
'location': self.workspace.server.entry,
}
# ===
class JOneDaySummary(object):
def __init__(self, params, job):
self.jobinfo = job
self.params = params
self.workspace = job['workspace']
def run(self):
params = self.params
ip = {}
url = {}
ut = {}
uc = {}
for i in self.params['partitions']:
content = O3Space(i[0]).GET(i[1])
(hip, hurl, hut, huc) = fastloads(content)
ip.update(hip)
url.update(hurl)
ut.update(hut)
uc.update(huc)
cout('%s ip:%d url:%d ut:%d uc:%d' % (
self.jobinfo['jobid'], len(ip), len(url), len(ut), len(uc)))
O3 = o3lib.base.O3(self.workspace)
# One Day Result
prefix = self.params['prefix']
hourinfo = self.params['hourinfo']
logname, year, month, day = prefix.split('/')[1:]
hourname = 'oneday02/%s/day/%s-%s.%s.%s' % (year, logname, year, month, day)
def hourinfonamekey(x):
return x[0].split('-')[-1]
hourinfo.sort(key = hourinfonamekey)
pv = sum([h[1]['pv'] for h in hourinfo])
hourtxt = '\n'.join([
'%s %s' % (hourinfonamekey(h), ','.join([
'%s:%s' % (i, str(h[1][i])) for i in ('ip', 'ut', 'uc', 'pv', 'url')])
) for h in hourinfo])
daytxt = '%s/%s.%s.%s - ip:%d,ut:%d,uc:%d,pv:%d,url:%d' % (
logname, year, month, day, len(ip), len(ut), len(uc), pv, len(url))
O3.saveResult(hourname, '\n'.join((daytxt, '--hourinfo--', hourtxt)))
self.jobinfo['result'] = 0
# --work-point--
# ===
class JPVLogHour(object):
def __init__(self, params, job):
self.kwargs = params
self.jobinfo = job
self.workspace = job['workspace']
def run(self):
begin = time.time()
params = self.kwargs
entityid = params['entityid']
entityname = params['entityname']
addr = params['addr']
label = params['label']
size = params['size']
node = params['node']
queue = Queue.Queue(10)
reader = StartRemoteReader(queue,
node = node, addr = addr, label = label,
name = entityname, size = size, entityid = entityid)
UL = PVLogCounter0(queue)
UL.count()
cout('%s ip:%d url:%d ut:%d uc:%d' % (
self.jobinfo['jobid'],
len(UL.ip), len(UL.url), len(UL.ut), len(UL.uc)))
# -- Dump dict to string IO buffer
souts = couttimer(UL.dump, PARTITIONS)
S = O3Space(('127.0.0.1', CC.DEFAULT_PORT))
jobid = self.jobinfo['jobid']
for i in range(PARTITIONS):
resid = '%s_RES_%d' % (jobid, i)
S.PUT(resid, souts[i])
self.jobinfo['result'] = {
'resultid': jobid,
'location': self.workspace.server.entry,
'insize0': UL.bytes / 1024.0 / 1024,
'restext':[
jobid, {
'pv': UL.lines, 'ip': len(UL.ip), 'url': len(UL.url),
'ut': len(UL.ut), 'uc': len(UL.uc)}
],
'debuginfo': '%s at %s - %.2fMb/%.2fs' % (
jobid,
self.workspace.server.id,
UL.bytes / 1024.0/1024,
time.time() - begin),
}
# ===
class PVLogCounter0(object):
def __init__(self, queue):
self.curline = []
self.lines = 0
self.queue = queue
self.ip = {}
self.url = {}
self.ut = {}
self.uc = {}
self.bytes = 0
def count(self):
uc = self.uc
ut = self.ut
ip = self.ip
url = self.url
queue = self.queue
lines = 0
bytes = 0
pending = ''
loop = True
while loop:
bs = self.queue.get()
if not bs:
loop = False
if pending == '':
continue
tokens = pending.split('\n')
pending = ''
else:
bytes += len(bs)
tokens = bs.split('\n')
tokens[0] = pending + tokens[0]
pending = tokens.pop()
for line in tokens:
try:
l = line.split('\t')
if l[7][0] == '4':
continue
mapincrease(ip, l[2])
mapincrease(url, l[4])
mapincrease(ut, l[11])
mapincrease(uc, l[12])
lines += 1
except Exception, e:
_D('EXCEPTION %s %s' % (repr(e), line))
self.lines = lines
self.bytes = bytes
# ---
def dump(self, n):
#res = []
ips = partitiondumps(self.ip, n)
urls = partitiondumps(self.url, n)
uts = partitiondumps(self.ut, n)
ucs = partitiondumps(self.uc, n)
return [ ''.join((ips[x], urls[x], uts[x], ucs[x])) for x in range(n) ]
# --end-- class PVLogCounter01
def generateJob(job, workspace):
classname = job['class']
G = globals()
C = G[classname]
param = job.get('params', {})
job['workspace'] = workspace
return C(param, job)
O3Mission = MOneDay02
| Python |
#!/usr/bin/python
codebase = {
'name': 'oneday01',
'version': '0.0.0.1',
'files': [
'oneday01/__init__.py',
'oneday01/oneday01.py',
],
'modules': [
'oneday01.oneday01',
'oneday01',
],
}
| Python |
#!/usr/bin/python
codebase = {
'name': 'oneday02',
'version': '0.0.0.1',
'files': [
'oneday02/__init__.py',
'oneday02/oneday02.py',
],
'modules': [
'oneday02.oneday02',
'oneday02',
],
}
| Python |
#!/usr/bin/python
codebase = {
'name': 'isgrid0',
'version': '0.0.0.1',
'files': [
'isgrid0/__init__.py',
'isgrid0/isgrid0.py',
],
'modules': [
'isgrid0.isgrid0',
'isgrid0',
],
}
| Python |
#!/usr/bin/python
codebase = {
'name': 'uipreducer01',
'version': '0.0.0.1',
'files': [
'uipreducer01/__init__.py',
'uipreducer01/uipreducer01.py',
],
'modules': [
'uipreducer01.uipreducer01',
'uipreducer01',
],
}
| Python |
import threading, Queue
import os, random, time
from o3grid import job, constants as CC
from o3grid.protocol import O3Channel, O3Space, O3Call
from o3grid.utility import cout, D as _D, D2 as _D2, DE as _E
from fastmap import inetstr2int as inet2int, _dumps, _loads
import o3lib.base
from o3lib.fs import StartO3EntityReader
VERSION = "0.0.0.1"
CODEBASE = 'uipreducer01'
MODULENAME = 'uipreducer01.uipreducer01'
# ----- utility classes and functions
# === Log Scanner class
# ==C-BaseScanner
class BaseScanner(object):
def __init__(self, queue):
self.queue = queue
self.lines = 0
self.bytes = 0
# ---
# Core Loop for log scanner
# ---
def scan(self): # ==M-BaseScanner-scan
loop = True
pending = ''
queue = self.queue
while loop:
block = queue.get()
if not block:
loop = False
if pending == '':
continue
lines = pending.split('\n')
self.lines += len(lines)
pending == ''
else:
lines = block.split('\n')
lines[0] = pending + lines[0]
pending = lines.pop()
self.lines += len(lines)
self.bytes += len(block)
self.analyse(lines)
# === Union Log IP counter
# ==C-UnionLogIPCounter
class UnionLogIPCounter(object):
def __init__(self):
self.uume = set()
self.itv = set()
self.dzh = set()
self.tt = set()
self.dzhDomains = (
'dzh', 'dzh2', 'dzh1',
'search', 'search2',
'txt', 'topic', 'best')
self.ttDomains = ('tt', 'post')
# ==M-UnionLogIPcounter-analyse
def analyse(self, lines):
dzhdomains = self.dzhDomains
ttdomains = self.ttDomains
for line in lines:
head, s, next = line.partition('|rlink|')
if not s:
continue
if next[2] == 'u': # UUME
self.uume.add(inet2int(head.split(' ', 1)[0]))
continue
if next[2] == 'i': # ITV
self.itv.add(inet2int(head.split(' ', 1)[0]))
continue
# MOP
domain = next[13:].partition('.mop.com/')[0]
if domain in dzhdomains:
self.dzh.add(inet2int(head.split(' ', 1)[0]))
continue
if domain in ttdomains:
self.tt.add(inet2int(head.split(' ', 1)[0]))
continue
# ==C-S15LogIPCounter
class S15LogIPCounter(object):
def __init__(self):
self.ip = set()
# ==M-S15LogIPCounter-analyse
def analyse(self, lines):
for line in lines:
tokens = line.split('\t')
if tokens[6] == '404':
continue
try:
self.ip.add(inet2int(tokens[2]))
except Exception, e:
print tokens
print line
raise e
# ==F-StartRemoteLogScanner
def StartRemoteLogScanner(cType, **params):
class Scanner(BaseScanner, cType):
def __init__(self, queue):
BaseScanner.__init__(self, queue)
cType.__init__(self)
queue = Queue.Queue(10)
reader = StartO3EntityReader(queue, **params)
scanner = Scanner(queue)
return (queue, reader, scanner)
# ----- job classes
# ==C-JOBBase
class JOBBase(object):
def __init__(self, params, job):
self.info = job
self.workspace = job['workspace']
self.params = params
# ==M-JOBBase-setupResult1
def setupResult1(self, begin):
self.info['result'] = {
'resultid': self.info['jobid'],
'location': self.workspace.server.entry,
'debuginfo': '%s at %s - %.2fs' % (
'-'.join(self.info['jobid'].split('-', 2)[:2]),
self.workspace.server.id,
time.time() - begin,
)
}
# ==M-JOBBase-updateResult
def updateResult(self, *args, **kwargs):
self.info['result'].update(*args, **kwargs)
# ==M-JOBBase-setupResult0
def setupResult0(self, begin, scanner):
self.info['result'] = {
'resultid': self.info['jobid'],
'location': self.workspace.server.entry,
'insize0': scanner.bytes / 1024.0 / 1024,
'debuginfo': '%s at %s - %.2fMb/%.2fs' % (
'-'.join(self.info['jobid'].split('-', 2)[:2]),
self.workspace.server.id,
scanner.bytes / 1024.0 / 1024,
time.time() - begin),
}
# ==C-JOBS15IPHour
class JOBS15IPHour(JOBBase):
# - addr, label, entityname, eneityid, size
# - hour, logname
# ==M-JOBS15IPHour-run
def run(self):
P = self.params
begin = time.time()
#eid = params['entityid']
#ename = params['entityname']
#addr = params['addr']
#label = params['label']
#node = params['node']
(queue, reader, scanner) = StartRemoteLogScanner(S15LogIPCounter,
node = P['node'],
addr = P['addr'],
label = P['label'],
name = P['entityname'],
#size = P['size'],
size = 0,
entityid = P['entityid'])
scanner.scan()
S = O3Space()
name = self.info['jobid']
S.PUT(name, _dumps(scanner.ip))
self.setupResult0(begin, scanner)
cout('-JOB-OUT- %s %d' % (
self.info['jobid'],
len(scanner.ip)))
# ==C-JOBUnionIPHour
class JOBUnionIPHour(JOBBase):
# ==M-JOBUnionIPHour-run
def run(self):
P = self.params
begin = time.time()
#eid = params['entityid']
#ename = params['entityname']
#addr = params['addr']
#label = params['label']
#node = params['node']
(queue, reader, scanner) = StartRemoteLogScanner(UnionLogIPCounter,
node = P['node'],
addr = P['addr'],
label = P['label'],
name = P['entityname'],
size = 0,
#size = P['size'],
entityid = P['entityid'])
scanner.scan()
S = O3Space()
name = "%s-" % (self.info['jobid'])
S.PUT(name + "dzh", _dumps(scanner.dzh))
S.PUT(name + "tt", _dumps(scanner.tt))
S.PUT(name + "uume", _dumps(scanner.uume))
S.PUT(name + "itv", _dumps(scanner.itv))
cout('-JOB-OUT- %s uume:%d dzh:%d tt:%d itv:%d' % (
self.info['jobid'],
len(scanner.uume), len(scanner.dzh),
len(scanner.tt), len(scanner.itv)))
self.setupResult0(begin, scanner)
# ==C-JOBUnionIPDay
class JOBUnionIPDay(JOBBase):
def run(self):
begin = time.time()
P = self.params
logname = P['logname']
ip = set()
for i in P['hours']:
content = O3Space(i[0]).GET('%s-%s' % (i[1], logname))
hourip = _loads(content)
ip.update(hourip)
name = "%s" % (self.info['jobid'])
S = O3Space()
S.PUT(name, _dumps(ip))
self.setupResult1(begin)
self.updateResult(logname = logname)
cout('-JOB-OUT- %s %d' % (
self.info['jobid'], len(ip)))
# ==C-JOBS15IPDay
class JOBS15IPDay(JOBBase):
def run(self):
begin = time.time()
P = self.params
logname = P['logname']
ip = set()
for i in P['hours']:
content = O3Space(i[0]).GET(i[1])
ip.update(_loads(content))
name = '%s' % (self.info['jobid'])
O3Space().PUT(name, _dumps(ip))
self.setupResult1(begin)
self.updateResult(logname = logname)
cout('-JOB-OUT- %s %d' % (
self.info['jobid'], len(ip)))
# ==C-JOBIPDayAll
class JOBIPDayAll(JOBBase):
def run(self):
begin = time.time()
P = self.params
lognames = P['lognames']
logfiles = P['logfiles']
restext = []
for logname in lognames:
f1 = logfiles[logname]
f2 = logfiles["union-" + logname]
c1 = O3Space(f1[0]).GET(f1[1])
c2 = O3Space(f2[0]).GET(f2[1])
ip1 = _loads(c1)
ip2 = _loads(c2)
cout('%s: ORIGIN:%d UNION:%d ORIGIN-UNION:%d/%d' % (
logname, len(ip1), len(ip2), len(ip1 - ip2), len(ip1) - len(ip2)))
restext.append('%s: ORIGIN:%d UNION:%d ORIGIN-UNION:%d/%d' % (
logname, len(ip1), len(ip2), len(ip1 - ip2), len(ip1) - len(ip2)))
cout('-JOB-OUT-RESULT %s %s: ORIGIN:%d UNION:%d ORIGIN-UNION:%d/%d' % (
P['logdate'], logname, len(ip1), len(ip2), len(ip1 - ip2), len(ip1) - len(ip2)))
restext.sort()
year,sep,date = P['logdate'].partition('.')
resname = 'uip01/%s/%s' % (year, date)
O3 = o3lib.base.O3(self.workspace)
O3.saveResult(resname, '\n'.join(restext))
self.info['result'] = 0
# ----- mission control class
# ==F-EntityNameToDate
def EntityNameToHour(name):
if name.endswith('.iz0'):
return '.'.join(name[:-4].split('/')[-4:])
return '.'.join(name.split('/')[-4:])
def EntityNameToDate(name): return '.'.join(name.split('/')[-4:-1])
def EntityNameToLogName(name): return name.split('/')[1]
def GetEntities(prefix):
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITY0', prefix)
if res[0] != CC.RET_OK:
return None
entities = res[2]
if not len(entities):
return None
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITYLOCATION0', [e[0] for e in entities])
shadows = res[2]
res = []
for e in entities:
s = random.choice(shadows[e[0]])
e.append(s)
return entities
# ==C-MissionIPReducer
class MissionIPReducer(job.Mission):
def __init__(self, id, kwargs = None):
job.Mission.__init__(self, id, kwargs)
self.name = 'IPReduce'
self.codebase = CODEBASE
def setup(self, kwargs):
self.kwargs = kwargs
def start(self):
_D('%s:--IPReducer--%s--' % (self.id, self.kwargs['date']))
self.starttime = time.time()
cout('-JOB-OUT- START--%s' % (self.id))
date = self.kwargs['date']
datename = date.replace('/', '.')
lognames = self.kwargs['lognames']
self.logfiles = dict()
lastJob = self.newSJob('Z0-%s' % datename , MODULENAME, 'JOBIPDayAll')
lastJob.setup0(
lognames = lognames,
logfiles = self.logfiles,
logdate = datename,
)
dayJobs = []
hourJobs = {}
self.hourIPS = {}
for l in lognames:
self.hourIPS[l] = list()
hourJobs[l] = list()
job = self.newSJob('D2-%s-%s' % (l, datename), MODULENAME, 'JOBS15IPDay')
job.setup0(
hours = self.hourIPS[l],
logname = l,
date = date)
hourLogs = GetEntities('plog/%s/%s' % (l, date))
serial = 0
for e in hourLogs:
eid, ename, emtime, esize = e[:4]
sid, snode, saddr, slabel, sname, size = e[-1]
hJob = self.newSJob('H2-%02d-%s-%s' % (serial, l, EntityNameToHour(ename)),
MODULENAME, 'JOBS15IPHour')
hJob.name = hJob.id
hJob.setup0(
node = snode,
entityname = ename,
entityid = eid,
addr = saddr,
label = slabel,
size = esize,
logname = l,
logdate = EntityNameToDate(ename),
loghour = EntityNameToHour(ename),
)
hourJobs[l].append(hJob)
job.need(hJob)
serial += 1
dayJobs.append(job)
lastJob.need(job)
unionDayJobs = []
self.unionHour = []
for l in lognames:
job = self.newSJob('D1-%s-%s' % (l, datename), MODULENAME, 'JOBUnionIPDay')
job.setup0(logname = l, hours = self.unionHour)
unionDayJobs.append(job)
lastJob.need(job)
unionHourJobs = []
unionLogs = GetEntities('plog/mopunion/%s/' % date.replace('.', '/'))
serial = 0
for e in unionLogs:
eid, ename, emtime, esize = e[:4]
sid, snode, saddr, slabel, sname, size = e[-1]
job = self.newSJob('H1-%02d-%s' % (serial, EntityNameToHour(ename)),
MODULENAME, 'JOBUnionIPHour')
job.name = job.id
job.setup0(
node = snode,
entityname = ename,
entityid = eid,
addr = saddr,
label = slabel,
size = esize,
logname = 'mopunion',
logdate = EntityNameToDate(ename),
loghour = EntityNameToHour(ename),
)
unionHourJobs.append(job)
for j in unionDayJobs:
j.need(job)
serial += 1
for j in unionHourJobs: j.fire()
for j in unionDayJobs: j.fire()
for j in dayJobs: j.fire()
for js in hourJobs.values():
for j in js: j.fire()
lastJob.fire()
self.insize0 = 0
def jobFinished(self, job, params):
if params == None:
cout('%s - failed' % job.id)
P = params
if job.id.startswith('D2-'):
logname = job.id.split('-')[1]
self.logfiles[logname] = (P['location'], P['resultid'])
if job.id.startswith('D1-'):
logname = job.id.split('-')[1]
self.logfiles['union-%s' % logname] = (P['location'], P['resultid'])
if job.id.startswith('H2-'):
logname = job.id.split('-')[2]
self.hourIPS[logname].append((P['location'], P['resultid']))
self.insize0 += P['insize0']
if job.id.startswith('H1-'):
self.unionHour.append((P['location'], P['resultid']))
self.insize0 += P['insize0']
if job.id.startswith('Z0-'):
cout('-MISSION-FINISHED- %.2fM in %.2fs' % (self.insize0, time.time() - self.starttime))
def generateJob(job, workspace):
classname = job['class']
G = globals()
C = G[classname]
param = job.get('params', {})
job['workspace'] = workspace
return C(param, job)
# ----- other instruction
# ----- classes and functions for test
# ==C-LocalLogScanner
class LocalScanner(object):
def __init__(self, filename):
self.fn = filename
self.lines = 0
self.bytes = 0
# ==M-LocalScanner-scan
def scan(self):
loop = True
pending = ''
bs = 1024 * 1023 * 8
fin = file(self.fn)
while loop:
block = fin.read(bs)
if not block:
loop = False
if not pending:
continue
lines = pending.split('\n')
self.lines += len(lines)
pending = ''
else:
lines = block.split('\n')
lines[0] = pending + lines[0]
pending = lines.pop()
self.lines += len(lines)
self.bytes += len(block)
self.analyse(lines)
fin.close()
# ---
# ==F-testLocalUnionLogScanner
def testLocalUnionLogScanner():
class LocalUnionLogScanner(LocalScanner, UnionLogIPCounter):
def __init__(self, fn):
LocalScanner.__init__(self, fn)
UnionLogIPCounter.__init__(self)
u = LocalUnionLogScanner('/tmp/access.union')
u.scan()
print 'uume:%d dzh:%d tt:%d' % (len(u.uume), len(u.dzh), len(u.tt))
# ==F-testLocalS15LogScanner
def testLocalS15LogScanner():
class LocalS15LogScanner(LocalScanner, S15LogIPCounter):
def __init__(self, fn):
LocalScanner.__init__(self, fn)
S15LogIPCounter.__init__(self)
u = LocalS15LogScanner('/tmp/0200_')
u.scan()
print 'ips:%d' % len(u.ip)
# --- main ---
if __name__ == '__main__':
#testLocalUnionLogScanner()
testLocalS15LogScanner()
| Python |
#!/usr/bin/python
codebase = {
'name': 'uipreducer01',
'version': '0.0.0.1',
'files': [
'uipreducer01/__init__.py',
'uipreducer01/uipreducer01.py',
],
'modules': [
'uipreducer01.uipreducer01',
'uipreducer01',
],
}
| Python |
#!/usr/bin/python
codebase = {
'name': 'oneday01',
'version': '0.0.0.1',
'files': [
'oneday01/__init__.py',
'oneday01/oneday01.py',
],
'modules': [
'oneday01.oneday01',
'oneday01',
],
}
| Python |
#!/usr/bin/python
from o3grid.utility import cout
class IsGrid0Job(object):
def run(self):
cout("I'm iSGrid0.Job")
cout("Load IsGrid0.IsGrid0")
def generateJob(jobinfo, workspace):
return IsGrid0Job()
| Python |
#!/usr/bin/python
from o3grid.utility import cout
class IsGrid0Job(object):
def run(self):
cout("I'm iSGrid0.Job")
cout("Load IsGrid0.IsGrid0")
def generateJob(jobinfo, workspace):
return IsGrid0Job()
| Python |
import threading, Queue
import os, random, time
import struct, zlib
import cPickle as pickle, cStringIO as StringIO
import operator, heapq
from o3grid import constants as CC
from o3grid.utility import cout, D as _D, D2 as _D2, DE as _E
from o3grid.protocol import O3Call, O3Channel, O3Space
from o3grid import job
import o3lib.base
from o3lib.fs import StartO3EntityReader, O3EntityReader
from fastmap import increase as mapincrease, partition as mappartition
from fastmap import fastdumps, fastloads, fastloads3, partitiondumps
MISSIONNAME = "TOP100"
CODEBASE = "onedaytop100"
MODULENAME = "onedaytop100.onedaytop100"
PARTITIONS = 8
MISSIONPREFIX = 'ODT1'
# --- Utility for date related
def WeekPostfix(datename):
dtime = time.strptime(datename, '%Y/%m/%d')
day = int(time.strftime('%w', dtime))
week = time.strftime('%W', date)
if day == 0: day = 7
tz = time.mktime(dtime)
begintz = tz - (3600 * 24 * (day - 1))
endtz = begintz - (3600 * 24 * 6)
return '%s-%s-%s' % (
week,
time.strftime('%m.%d', time.localtime(begintz)),
time.strftime('%m.%d', time.localtime(endtz)))
# --- OneDay01 Mission Class ---
class MOneDayTop100(job.Mission):
def __init__(self, id, kwargs):
job.Mission.__init__(self, id, kwargs)
self.name = MISSIONNAME
self.codebase = CODEBASE
def setup(self, kwargs):
self.kwargs = kwargs
def start(self):
self.starttime = time.time()
self.insize0 = 0.0
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITY1', self.kwargs['prefix'])
entitys = res[2]
entitys.sort(key = operator.itemgetter('size'), reverse=True)
res = O3Call(('localhost', CC.DEFAULT_PORT),
CC.SVC_WAREHOUSE, 'LISTENTITYLOCATION0', [ e['id'] for e in entitys])
shadows = res[2]
self.hourres = []
self.hourinfo = []
self.partitions = []
_D('%s:--START--:%s' % (self.id, self.kwargs['prefix']), '|')
self.totalJob = self.newSJob('C9-SUM', MODULENAME, 'JOBOneDaySummary')
self.totalJob.setup0(
prefix = self.kwargs['prefix'],
partitions = self.partitions,
hourinfo = self.hourinfo)
self.totalJob.fire()
self.partitionJobs = []
for i in range(PARTITIONS):
job = self.newSJob('C1-P%d' % i, MODULENAME, 'JOBPartitionSum')
job.setup0(
hourres = self.hourres,
partitionid = i)
job.fire()
self.totalJob.need(job)
self.partitionJobs.append(job)
serial = 0
for e in entitys:
#eid, ename, emtime, esize = e
eid = e['id']
ename = e['name']
emtime = e['mtime']
esize = e['size']
sid, snode, saddr, slabel, sname, size = random.choice(shadows[eid])
taskname = 'C0-%02d-%s' % (serial, ename.split('/')[-1].split('.')[0])
serial += 1
job = self.newSJob(taskname, MODULENAME, 'JOBLogHour')
job.name = job.id
job.setup0(
entityname = ename,
entityid = eid,
addr = saddr,
node = snode,
label = slabel,
size = esize,)
job.fire()
for j in self.partitionJobs:
j.need(job)
def jobFinished(self, job, params):
if job.id.startswith('C0-'):
self.hourres.append((params['location'], params['resultid']))
self.insize0 += params.get('insize0', 0.0)
self.hourinfo.append(params.get('restext'))
elif job.id.startswith('C1-'):
self.partitions.append((params['location'], params['resultid']))
elif job.id.startswith('C9-'):
cout('-MISSION-END- {%s} %.2fm %.2fs' % (
self.id, self.insize0, time.time() - self.starttime))
# ----- UTILITIES -----
def couttimer(func, *args, **kwargs):
begin = time.time()
res = func(*args, **kwargs)
end = time.time()
cout('%s - %.2fs' % (func.func_name, end - begin))
return res
# ===
def MapPlusList0(map, l):
for (k, v) in l.iteritems():
mapincrease(map, k, v)
# ===
def RemoteReader(queue, node, addr, label, name, size, entityid):
bs = 512000 * 8
try:
S = O3Channel().connect((addr, CC.DEFAULT_PORT))
#res = S(CC.SVC_SPACE, 'ROOMGET2',
# label, name, 0, size, entityid, 1024 * 1024 * 4, 1)
res = S(CC.SVC_SPACE, 'ROOMGET1', label, name, 0, size, entityid)
if res[0] != CC.RET_OK:
return
rest = size
while rest != 0:
if rest > bs:
buf = S.recvAll(bs)
else:
buf = S.recvAll(rest)
if not buf:
break
rest -= len(buf)
queue.put(buf)
#header = S.recvAll(4)
#bs = struct.unpack('I', header)[0]
#buf = S.recvAll(bs)
#contents = zlib.decompress(buf)
#rest -= len(contents)
#queue.put(contents)
S.getMessage()
S.close()
finally:
queue.put(None)
# --end--
#def StartRemoteReader(*args):
# thr = threading.Thread(
# name = 'REMOTEREADER',
# target = RemoteReader,
# args = args)
# thr.setDaemon(True)
# thr.start()
# return thr
# ===
class JOBPartitionSum(object):
def __init__(self, params, job):
self.jobinfo = job
self.params = params
self.workspace = job['workspace']
def run(self):
params = self.params
partitionid = params['partitionid']
ip = {}
url = {}
ut = {}
uc = {}
for i in self.params['hourres']:
content = O3Space(i[0]).GET('%s_RES_%d' % (i[1], partitionid))
(hip, hurl, hut, huc) = fastloads3(content)
MapPlusList0(ip, hip)
MapPlusList0(url, hurl)
MapPlusList0(ut, hut)
MapPlusList0(uc, huc)
content = fastdumps((ip, url, ut, uc))
S = O3Space(('127.0.0.1', CC.DEFAULT_PORT))
resid = '%s_RES' % self.jobinfo['jobid']
S.PUT(resid, content)
self.jobinfo['result'] = {
'resultid': resid,
'location': self.workspace.server.entry,
}
# ===
class JOBOneDaySummary(object):
def __init__(self, params, job):
self.jobinfo = job
self.params = params
self.workspace = job['workspace']
def run(self):
params = self.params
ip = {}
url = {}
ut = {}
uc = {}
for i in self.params['partitions']:
content = O3Space(i[0]).GET(i[1])
(hip, hurl, hut, huc) = fastloads(content)
ip.update(hip)
url.update(hurl)
ut.update(hut)
uc.update(huc)
cout('%s ip:%d url:%d ut:%d uc:%d' % (
self.jobinfo['jobid'], len(ip), len(url), len(ut), len(uc)))
O3 = o3lib.base.O3(self.workspace)
nouse0, logname, year, month, day = params['prefix'].split('/')
basename = 'top100/%s/%s-%s.%s.%s-' % (year, logname, year, month, day)
O3.saveResult(basename + 'ip', self.sortResult(ip))
O3.saveResult(basename + 'url', self.sortResult(url))
O3.saveResult(basename + 'ut', self.sortResult(ut))
O3.saveResult(basename + 'uc', self.sortResult(uc))
self.jobinfo['result'] = 0
def sortResult(self, dict):
# --work-point--
res = heapq.nlargest(200, dict.iteritems(), key = operator.itemgetter(1))
return '\n'.join(['%s - %s' % x for x in res])
# ===
class JOBLogHour(object):
def __init__(self, params, job):
self.kwargs = params
self.jobinfo = job
self.workspace = job['workspace']
def run(self):
begin = time.time()
params = self.kwargs
entityid = params['entityid']
entityname = params['entityname']
addr = params['addr']
label = params['label']
size = params['size']
node = params['node']
queue = Queue.Queue(10)
#reader = StartRemoteReader(queue, node, addr, label, entityname, size, entityid)
reader = StartO3EntityReader(queue,
node = node,
addr = addr,
label = label,
name = entityname,
size = 0,
entityid = entityid)
UL = PVLogCounter0(queue)
UL.count()
cout('%s ip:%d url:%d ut:%d uc:%d' % (
self.jobinfo['jobid'],
len(UL.ip), len(UL.url), len(UL.ut), len(UL.uc)))
# -- Dump dict to string IO buffer
souts = couttimer(UL.dump, PARTITIONS)
S = O3Space(('127.0.0.1', CC.DEFAULT_PORT))
jobid = self.jobinfo['jobid']
for i in range(PARTITIONS):
resid = '%s_RES_%d' % (jobid, i)
S.PUT(resid, souts[i])
# save result table to BRN(Big Result Node)
# generate result name
missionid, sep, jid = jobid.partition(':')
hourname = jid.split('-')[-1]
ignore0, logname, datename = missionid.split('-')
year,month,day = datename.split('.')
resname = 'top100/detail/%s/%s-%s.%s-' % (year, logname, datename, hourname)
#O3 = o3lib.base.O3(self.workspace)
#O3.saveResult(resname + 'ip', fastdumps(UL.ip), 'BIGRESULT')
#O3.saveResult(resname + 'url', fastdumps(UL.url), 'BIGRESULT')
#O3.saveResult(resname + 'ut', fastdumps(UL.ut), 'BIGRESULT')
#O3.saveResult(resname + 'uc', fastdumps(UL.uc), 'BIGRESULT')
self.jobinfo['result'] = {
'resultid': jobid,
'location': self.workspace.server.entry,
'insize0': UL.bytes / 1024.0 / 1024,
'restext':[
jobid, {
'pv': UL.lines, 'ip': len(UL.ip), 'url': len(UL.url),
'ut': len(UL.ut), 'uc': len(UL.uc)}
],
'debuginfo': '%s at %s - %.2fMb/%.2fs' % (
jobid,
self.workspace.server.id,
UL.bytes / 1024.0/1024,
time.time() - begin),
}
# ===
class PVLogCounter0(object):
def __init__(self, queue):
self.curline = []
self.lines = 0
self.queue = queue
self.ip = {}
self.url = {}
self.ut = {}
self.uc = {}
self.bytes = 0
def count(self):
uc = self.uc
ut = self.ut
ip = self.ip
url = self.url
queue = self.queue
lines = 0
bytes = 0
pending = ''
loop = True
while loop:
bs = self.queue.get()
if not bs:
loop = False
if pending == '':
continue
tokens = pending.split('\n')
pending = ''
else:
bytes += len(bs)
tokens = bs.split('\n')
tokens[0] = pending + tokens[0]
pending = tokens.pop()
for line in tokens:
l = line.split('\t')
if l[7][0] == '4':
continue
mapincrease(ip, l[2])
mapincrease(url, l[4])
mapincrease(ut, l[11])
mapincrease(uc, l[12])
lines += 1
self.lines = lines
self.bytes = bytes
# ---
def dump(self, n):
#res = []
ips = partitiondumps(self.ip, n)
urls = partitiondumps(self.url, n)
uts = partitiondumps(self.ut, n)
ucs = partitiondumps(self.uc, n)
return [ ''.join((ips[x], urls[x], uts[x], ucs[x])) for x in range(n) ]
# --end-- class PVLogCounter01
def generateJob(job, workspace):
classname = job['class']
G = globals()
C = G[classname]
param = job.get('params', {})
job['workspace'] = workspace
return C(param, job)
O3Mission = MOneDayTop100
| Python |
#!/usr/bin/python
codebase = {
'name': 'oneday02',
'version': '0.0.0.1',
'files': [
'oneday02/__init__.py',
'oneday02/oneday02.py',
],
'modules': [
'oneday02.oneday02',
'oneday02',
],
}
| Python |
import wx
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None)
self.menubar = wx.MenuBar()
self.file = wx.Menu()
self.file.Append(wx.ID_ANY,'&Test')
self.menubar.Append(self.file,'File')
self.SetMenuBar(self.menubar)
self.Show()
app = wx.App(False)
frame = MainFrame()
app.MainLoop() | Python |
import MySQLdb
import time
import random
import operator
db=MySQLdb.connect(host="localhost",user="root", passwd="",db="phone_tracker")
c=db.cursor()
c.executemany(""" TRUNCATE TABLE `resultset-location`""",[()])
long = 1
lat = 1
x = 3
y = 4
i = 0
for y in range(1,y+1):
for x in range(1,x+1):
hostname = "Host "+ str(i)
c.executemany(
"""
INSERT INTO `resultset-location` (`hostname`, `location`, `suburb`, `postcode`, `latitude`, `longitude`)
VALUES (%s, %s, %s, %s, %s, %s)
""",
[
(hostname, i, i, "POSTCODE", float(y), float(x))
])
i = i+1
db.query("""SELECT * FROM `resultset-location`""")
r=db.use_result()
db.close(); | Python |
import MySQLdb
import time
import random
import operator
class sniffer:
def __init__(self):
self.db=MySQLdb.connect(host="localhost",user="root", passwd="",db="phone_tracker")
self.db.query("""SELECT * FROM `resultset-location`""")
r=self.db.use_result()
self.sniffer = []
for each in r.fetch_row(maxrows=0):
self.sniffer.append(each)
def add(self, name="-", mac="AABBCC"):
c=self.db.cursor()
c.executemany(
"""
INSERT INTO `resultset-counts` (`hostname`, `device_type`, `mac_address`)
VALUES (%s, %s, %s)
""",
[
(name, "wifi", mac)
] )
print "Added: ", "NAME:",name," MAC:", mac
def view(self, rows=5):
#db=MySQLdb.connect(host="localhost",user="root", passwd="",db="phone_tracker")
self.db.query("""SELECT * FROM `resultset-counts` ORDER BY dt DESC""")
r=self.db.use_result()
print "VIEW"
for each in r.fetch_row(rows):
print each
def clearscreen(numlines=100):
"""
Clear the console.
numlines is an optional argument used only as a fall-back.
"""
import os
if os.name == "posix":
# Unix/Linux/MacOS/BSD/etc
os.system('clear')
elif os.name in ("nt", "dos", "ce"):
# DOS/Windows
os.system('CLS')
else:
# Fallback for other operating systems.
print '\n' * numlines
def move(self, mode, id, mac):
if (mode == "rand"):
newHost = random.randint(1, len(self.sniffer)+1)
self.add(name="Host "+str(newHost), mac=mac)
if("x" in mode):
# long
self.sorted_list = sorted(self.sniffer, key=operator.itemgetter(5))
else:
# lat
self.sorted_list = sorted(self.sniffer, key=operator.itemgetter(6))
for i in range(len(self.sorted_list)):
if self.sorted_list[i][1] == id:
newHost = self.sorted_list[i][1]
try:
if "-" in mode:
if (i!=0):
newHost = self.sorted_list[i-1][1]
else:
newHost = self.sorted_list[i+1][1]
except:
pass
finally:
self.add(name=newHost, mac=mac)
return newHost
class incLetter:
def __init__(self, name):
self.name = name
def __add__(self, other):
for x in range(1, len(self.name)):
a = list(self.name)[-1 * x]
return self.name[:len(self.name)-x] + chr(ord(a) + other)
class node:
def __init__(self, host, mac):
self.host = host
self.mac = mac
# non-blocking input for windows
def kbfunc():
import msvcrt
x = msvcrt.kbhit()
if x:
ret = ord(msvcrt.getch())
else:
ret = 0
return ret
mysniffer = sniffer()
nodes = []
mac = incLetter('AABBCC')
for each in range(0,11):
i = random.randint(0, len(mysniffer.sniffer)+1)
nodes.append(node("Host "+str(i), mac+each))
run=True
mode = "rand"
count = False
while 1:
mysniffer.clearscreen()
print "Modes: x,-x,y,-y"
input = kbfunc()
if input:
print ">>"
mode = raw_input()
if (run):
# Settings
# number people
# END Settings
#mode = "-y"
print "Mode = "+str(mode)
tmp = []
for each in nodes:
if (random.randint(1, 3)):
time.sleep(random.randint(1,3))
tmp.append(node(mysniffer.move(mode, each.host, each.mac), each.mac))
nodes = tmp
mysniffer.view() | Python |
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based (somewhat) on the
good old 'SimpleOSC' implementation by Daniel Holth & Clinton McChesney.
This implementation is intended to still be 'Simple' to the user, but much more complete
(with OSCServer & OSCClient classes) and much more powerful
(the OSCMultiClient supports subscriptions & message-filtering,
OSCMessage & OSCBundle are now proper container-types)
================
OpenSoundControl
================
OpenSoundControl is a network-protocol for sending (small) packets of addressed data over network sockets.
This OSC-implementation uses the UDP/IP protocol for sending and receiving packets.
(Although it is theoretically possible to send OSC-packets over TCP, almost all known implementations use UDP)
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a (host:port) network-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
The OSCMessage class makes it easy to create & manipulate OSC-messages of this kind in a 'pythonesque' way
(that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only OSC-messages as 'payload'. Recursively.
(meaning; an OSC-bundle could contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an OSC-address. (but the OSC-messages
a bundle contains will have OSC-addresses!)
Also, an OSC-bundle can have a timetag, essentially telling the receiving Server to 'hold' the bundle until
the specified time.
The OSCBundle class allows easy cration & manipulation of OSC-bundles.
see also http://opensoundcontrol.org/spec-1_0
---------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket' module) to send
binary representations of OSC-messages to a remote host:port address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local port, and handles
incoming requests. Either one-after-the-other (OSCServer) or in a multi-threaded / multi-process fashion
(ThreadingOSCServer / ForkingOSCServer). If the Server has a callback-function (a.k.a. handler) registered
to 'deal with' (i.e. handle) the received message's OSC-address, that function is called, passing it the (decoded) message
The different OSCServers implemented here all support the (recursive) un-bundling of OSC-bundles,
and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 's' (string) and 'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
---------
Stock, V2_Lab, Rotterdam, 2008
----------
Changelog:
----------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn
global version
version = ("0.3","5b", "$Rev: 5294 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address=""):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument
"""
self.clear(address)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
binary = struct.pack('>ll', long(secs), long(fract * 1e9))
else:
binary = struct.pack('>ll', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">ll", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(high) + float(low / 1e9)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
When the 'address' argument is given this client is connected to a specific remote server.
- address ((host, port) tuple): the address of the remote server to send all messages to
Otherwise it acts as a generic client:
If address == 'None', the client doesn't connect to a specific remote server,
and the remote address must be supplied when calling sendto()
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
if server == None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = None
else:
self.setServer(server)
self.client_address = None
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
if self.socket != None:
self.close()
self.socket = server.socket.dup()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
return self.socket.getpeername()
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self.socket.connect(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.connect(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def dispatchMessage(self, pattern, tags, data):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.server.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.server.callbacks[addr](pattern, tags, data, self.client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.server.callbacks:
reply = self.server.callbacks['default'](pattern, tags, data, self.client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:])
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:])
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
self.callbacks = {}
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.socket = self.socket.dup()
client.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, client.sndbuf_size)
client._fd = client.socket.fileno()
client.server = self
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
if self.client != None:
self.client.close()
self.client = client
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self,address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# Testing Program
#
######
if __name__ == "__main__":
import optparse
default_port = 2222
# define command-line options
op = optparse.OptionParser(description="OSC.py OpenSoundControl-for-Python Test Program")
op.add_option("-l", "--listen", dest="listen",
help="listen on given host[:port]. default = '0.0.0.0:%d'" % default_port)
op.add_option("-s", "--sendto", dest="sendto",
help="send to given host[:port]. default = '127.0.0.1:%d'" % default_port)
op.add_option("-t", "--threading", action="store_true", dest="threading",
help="Test ThreadingOSCServer")
op.add_option("-f", "--forking", action="store_true", dest="forking",
help="Test ForkingOSCServer")
op.add_option("-u", "--usage", action="help", help="show this help message and exit")
op.set_defaults(listen=":%d" % default_port)
op.set_defaults(sendto="")
op.set_defaults(threading=False)
op.set_defaults(forking=False)
# Parse args
(opts, args) = op.parse_args()
addr, server_prefix = parseUrlStr(opts.listen)
if addr != None and addr[0] != None:
if addr[1] != None:
listen_address = addr
else:
listen_address = (addr[0], default_port)
else:
listen_address = ('', default_port)
targets = {}
for trg in opts.sendto.split(','):
(addr, prefix) = parseUrlStr(trg)
if len(prefix):
(prefix, filters) = parseFilterStr(prefix)
else:
filters = {}
if addr != None:
if addr[1] != None:
targets[addr] = [prefix, filters]
else:
targets[(addr[0], listen_address[1])] = [prefix, filters]
elif len(prefix) or len(filters):
targets[listen_address] = [prefix, filters]
welcome = "Welcome to the OSC testing program."
print welcome
hexDump(welcome)
print
message = OSCMessage()
message.setAddress("/print")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
print message
hexDump(message.getBinary())
print "\nMaking and unmaking a message.."
strings = OSCMessage("/prin{ce,t}")
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
print strings
hexDump(raw)
print "Retrieving arguments..."
data = raw
for i in range(6):
text, data = _readString(data)
print text
number, data = _readFloat(data)
print number
number, data = _readFloat(data)
print number
number, data = _readInt(data)
print number
print decodeOSC(raw)
print "\nTesting Blob types."
blob = OSCMessage("/pri*")
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
print blob
hexDump(blob.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.")
print1.append(42)
print1.append(3.1415926)
print "\nTesting OSCBundle"
bundle = OSCBundle()
bundle.append(print1)
bundle.append({'addr':"/print", 'args':["bundled messages:", 2]})
bundle.setAddress("/*print")
bundle.append(("no,", 3, "actually."))
print bundle
hexDump(bundle.getBinary())
# Instantiate OSCClient
print "\nInstantiating OSCClient:"
if len(targets):
c = OSCMultiClient()
c.updateOSCTargets(targets)
else:
c = OSCClient()
c.connect(listen_address) # connect back to our OSCServer
print c
if hasattr(c, 'getOSCTargetStrings'):
print "Sending to:"
for (trg, filterstrings) in c.getOSCTargetStrings():
out = trg
for fs in filterstrings:
out += " %s" % fs
print out
# Now an OSCServer...
print "\nInstantiating OSCServer:"
# define a message-handler function for the server to call.
def printing_handler(addr, tags, stuff, source):
msg_string = "%s [%s] %s" % (addr, tags, str(stuff))
sys.stdout.write("OSCServer Got: '%s' from %s\n" % (msg_string, getUrlStr(source)))
# send a reply to the client.
msg = OSCMessage("/printed")
msg.append(msg_string)
return msg
if opts.threading:
s = ThreadingOSCServer(listen_address, c, return_port=listen_address[1])
elif opts.forking:
s = ForkingOSCServer(listen_address, c, return_port=listen_address[1])
else:
s = OSCServer(listen_address, c, return_port=listen_address[1])
print s
# Set Server to return errors as OSCMessages to "/error"
s.setSrvErrorPrefix("/error")
# Set Server to reply to server-info requests with OSCMessages to "/serverinfo"
s.setSrvInfoPrefix("/serverinfo")
# this registers a 'default' handler (for unmatched messages),
# an /'error' handler, an '/info' handler.
# And, if the client supports it, a '/subscribe' & '/unsubscribe' handler
s.addDefaultHandlers()
s.addMsgHandler("/print", printing_handler)
# if client & server are bound to 'localhost', server replies return to itself!
s.addMsgHandler("/printed", s.msgPrinter_handler)
s.addMsgHandler("/serverinfo", s.msgPrinter_handler)
print "Registered Callback-functions:"
for addr in s.getOSCAddressSpace():
print addr
print "\nStarting OSCServer. Use ctrl-C to quit."
st = threading.Thread(target=s.serve_forever)
st.start()
if hasattr(c, 'targets') and listen_address not in c.targets.keys():
print "\nSubscribing local Server to local Client"
c2 = OSCClient()
c2.connect(listen_address)
subreq = OSCMessage("/subscribe")
subreq.append(listen_address)
print "sending: ", subreq
c2.send(subreq)
c2.close()
time.sleep(0.1)
print "\nRequesting OSC-address-space and subscribed clients from OSCServer"
inforeq = OSCMessage("/info")
for cmd in ("info", "list", "clients"):
inforeq.clearData()
inforeq.append(cmd)
print "sending: ", inforeq
c.send(inforeq)
time.sleep(0.1)
print2 = print1.copy()
print2.setAddress('/noprint')
print "\nSending Messages"
for m in (message, print1, print2, strings, bundle):
print "sending: ", m
c.send(m)
time.sleep(0.1)
print "\nThe next message's address will match both the '/print' and '/printed' handlers..."
print "sending: ", blob
c.send(blob)
time.sleep(0.1)
print "\nBundles can be given a timestamp.\nThe receiving server should 'hold' the bundle until its time has come"
waitbundle = OSCBundle("/print")
waitbundle.setTimeTag(time.time() + 5)
if s.__class__ == OSCServer:
waitbundle.append("Note how the (single-thread) OSCServer blocks while holding this bundle")
else:
waitbundle.append("Note how the %s does not block while holding this bundle" % s.__class__.__name__)
print "Set timetag 5 s into the future"
print "sending: ", waitbundle
c.send(waitbundle)
time.sleep(0.1)
print "Recursing bundles, with timetags set to 10 s [25 s, 20 s, 10 s]"
bb = OSCBundle("/print")
bb.setTimeTag(time.time() + 10)
b = OSCBundle("/print")
b.setTimeTag(time.time() + 25)
b.append("held for 25 sec")
bb.append(b)
b.clearData()
b.setTimeTag(time.time() + 20)
b.append("held for 20 sec")
bb.append(b)
b.clearData()
b.setTimeTag(time.time() + 15)
b.append("held for 15 sec")
bb.append(b)
if s.__class__ == OSCServer:
bb.append("Note how the (single-thread) OSCServer handles the bundle's contents in order of appearance")
else:
bb.append("Note how the %s handles the sub-bundles in the order dictated by their timestamps" % s.__class__.__name__)
bb.append("Each bundle's contents, however, are processed in random order (dictated by the kernel's threading)")
print "sending: ", bb
c.send(bb)
time.sleep(0.1)
print "\nMessages sent!"
print "\nWaiting for OSCServer. Use ctrl-C to quit.\n"
try:
while True:
time.sleep(30)
except KeyboardInterrupt:
print "\nClosing OSCServer."
s.close()
print "Waiting for Server-thread to finish"
st.join()
print "Closing OSCClient"
c.close()
print "Done"
sys.exit(0)
| Python |
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based (somewhat) on the
good old 'SimpleOSC' implementation by Daniel Holth & Clinton McChesney.
This implementation is intended to still be 'Simple' to the user, but much more complete
(with OSCServer & OSCClient classes) and much more powerful
(the OSCMultiClient supports subscriptions & message-filtering,
OSCMessage & OSCBundle are now proper container-types)
================
OpenSoundControl
================
OpenSoundControl is a network-protocol for sending (small) packets of addressed data over network sockets.
This OSC-implementation uses the UDP/IP protocol for sending and receiving packets.
(Although it is theoretically possible to send OSC-packets over TCP, almost all known implementations use UDP)
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a (host:port) network-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
The OSCMessage class makes it easy to create & manipulate OSC-messages of this kind in a 'pythonesque' way
(that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only OSC-messages as 'payload'. Recursively.
(meaning; an OSC-bundle could contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an OSC-address. (but the OSC-messages
a bundle contains will have OSC-addresses!)
Also, an OSC-bundle can have a timetag, essentially telling the receiving Server to 'hold' the bundle until
the specified time.
The OSCBundle class allows easy cration & manipulation of OSC-bundles.
see also http://opensoundcontrol.org/spec-1_0
---------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket' module) to send
binary representations of OSC-messages to a remote host:port address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local port, and handles
incoming requests. Either one-after-the-other (OSCServer) or in a multi-threaded / multi-process fashion
(ThreadingOSCServer / ForkingOSCServer). If the Server has a callback-function (a.k.a. handler) registered
to 'deal with' (i.e. handle) the received message's OSC-address, that function is called, passing it the (decoded) message
The different OSCServers implemented here all support the (recursive) un-bundling of OSC-bundles,
and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 's' (string) and 'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
---------
Stock, V2_Lab, Rotterdam, 2008
----------
Changelog:
----------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn
global version
version = ("0.3","5b", "$Rev: 5294 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address=""):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument
"""
self.clear(address)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
binary = struct.pack('>ll', long(secs), long(fract * 1e9))
else:
binary = struct.pack('>ll', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">ll", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(high) + float(low / 1e9)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
When the 'address' argument is given this client is connected to a specific remote server.
- address ((host, port) tuple): the address of the remote server to send all messages to
Otherwise it acts as a generic client:
If address == 'None', the client doesn't connect to a specific remote server,
and the remote address must be supplied when calling sendto()
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
if server == None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = None
else:
self.setServer(server)
self.client_address = None
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
if self.socket != None:
self.close()
self.socket = server.socket.dup()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
return self.socket.getpeername()
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self.socket.connect(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.connect(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def dispatchMessage(self, pattern, tags, data):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.server.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.server.callbacks[addr](pattern, tags, data, self.client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.server.callbacks:
reply = self.server.callbacks['default'](pattern, tags, data, self.client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:])
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:])
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
self.callbacks = {}
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.socket = self.socket.dup()
client.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, client.sndbuf_size)
client._fd = client.socket.fileno()
client.server = self
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
if self.client != None:
self.client.close()
self.client = client
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self,address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# Testing Program
#
######
if __name__ == "__main__":
import optparse
default_port = 2222
# define command-line options
op = optparse.OptionParser(description="OSC.py OpenSoundControl-for-Python Test Program")
op.add_option("-l", "--listen", dest="listen",
help="listen on given host[:port]. default = '0.0.0.0:%d'" % default_port)
op.add_option("-s", "--sendto", dest="sendto",
help="send to given host[:port]. default = '127.0.0.1:%d'" % default_port)
op.add_option("-t", "--threading", action="store_true", dest="threading",
help="Test ThreadingOSCServer")
op.add_option("-f", "--forking", action="store_true", dest="forking",
help="Test ForkingOSCServer")
op.add_option("-u", "--usage", action="help", help="show this help message and exit")
op.set_defaults(listen=":%d" % default_port)
op.set_defaults(sendto="")
op.set_defaults(threading=False)
op.set_defaults(forking=False)
# Parse args
(opts, args) = op.parse_args()
addr, server_prefix = parseUrlStr(opts.listen)
if addr != None and addr[0] != None:
if addr[1] != None:
listen_address = addr
else:
listen_address = (addr[0], default_port)
else:
listen_address = ('', default_port)
targets = {}
for trg in opts.sendto.split(','):
(addr, prefix) = parseUrlStr(trg)
if len(prefix):
(prefix, filters) = parseFilterStr(prefix)
else:
filters = {}
if addr != None:
if addr[1] != None:
targets[addr] = [prefix, filters]
else:
targets[(addr[0], listen_address[1])] = [prefix, filters]
elif len(prefix) or len(filters):
targets[listen_address] = [prefix, filters]
welcome = "Welcome to the OSC testing program."
print welcome
hexDump(welcome)
print
message = OSCMessage()
message.setAddress("/print")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
print message
hexDump(message.getBinary())
print "\nMaking and unmaking a message.."
strings = OSCMessage("/prin{ce,t}")
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
print strings
hexDump(raw)
print "Retrieving arguments..."
data = raw
for i in range(6):
text, data = _readString(data)
print text
number, data = _readFloat(data)
print number
number, data = _readFloat(data)
print number
number, data = _readInt(data)
print number
print decodeOSC(raw)
print "\nTesting Blob types."
blob = OSCMessage("/pri*")
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
print blob
hexDump(blob.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.")
print1.append(42)
print1.append(3.1415926)
print "\nTesting OSCBundle"
bundle = OSCBundle()
bundle.append(print1)
bundle.append({'addr':"/print", 'args':["bundled messages:", 2]})
bundle.setAddress("/*print")
bundle.append(("no,", 3, "actually."))
print bundle
hexDump(bundle.getBinary())
# Instantiate OSCClient
print "\nInstantiating OSCClient:"
if len(targets):
c = OSCMultiClient()
c.updateOSCTargets(targets)
else:
c = OSCClient()
c.connect(listen_address) # connect back to our OSCServer
print c
if hasattr(c, 'getOSCTargetStrings'):
print "Sending to:"
for (trg, filterstrings) in c.getOSCTargetStrings():
out = trg
for fs in filterstrings:
out += " %s" % fs
print out
# Now an OSCServer...
print "\nInstantiating OSCServer:"
# define a message-handler function for the server to call.
def printing_handler(addr, tags, stuff, source):
msg_string = "%s [%s] %s" % (addr, tags, str(stuff))
sys.stdout.write("OSCServer Got: '%s' from %s\n" % (msg_string, getUrlStr(source)))
# send a reply to the client.
msg = OSCMessage("/printed")
msg.append(msg_string)
return msg
if opts.threading:
s = ThreadingOSCServer(listen_address, c, return_port=listen_address[1])
elif opts.forking:
s = ForkingOSCServer(listen_address, c, return_port=listen_address[1])
else:
s = OSCServer(listen_address, c, return_port=listen_address[1])
print s
# Set Server to return errors as OSCMessages to "/error"
s.setSrvErrorPrefix("/error")
# Set Server to reply to server-info requests with OSCMessages to "/serverinfo"
s.setSrvInfoPrefix("/serverinfo")
# this registers a 'default' handler (for unmatched messages),
# an /'error' handler, an '/info' handler.
# And, if the client supports it, a '/subscribe' & '/unsubscribe' handler
s.addDefaultHandlers()
s.addMsgHandler("/print", printing_handler)
# if client & server are bound to 'localhost', server replies return to itself!
s.addMsgHandler("/printed", s.msgPrinter_handler)
s.addMsgHandler("/serverinfo", s.msgPrinter_handler)
print "Registered Callback-functions:"
for addr in s.getOSCAddressSpace():
print addr
print "\nStarting OSCServer. Use ctrl-C to quit."
st = threading.Thread(target=s.serve_forever)
st.start()
if hasattr(c, 'targets') and listen_address not in c.targets.keys():
print "\nSubscribing local Server to local Client"
c2 = OSCClient()
c2.connect(listen_address)
subreq = OSCMessage("/subscribe")
subreq.append(listen_address)
print "sending: ", subreq
c2.send(subreq)
c2.close()
time.sleep(0.1)
print "\nRequesting OSC-address-space and subscribed clients from OSCServer"
inforeq = OSCMessage("/info")
for cmd in ("info", "list", "clients"):
inforeq.clearData()
inforeq.append(cmd)
print "sending: ", inforeq
c.send(inforeq)
time.sleep(0.1)
print2 = print1.copy()
print2.setAddress('/noprint')
print "\nSending Messages"
for m in (message, print1, print2, strings, bundle):
print "sending: ", m
c.send(m)
time.sleep(0.1)
print "\nThe next message's address will match both the '/print' and '/printed' handlers..."
print "sending: ", blob
c.send(blob)
time.sleep(0.1)
print "\nBundles can be given a timestamp.\nThe receiving server should 'hold' the bundle until its time has come"
waitbundle = OSCBundle("/print")
waitbundle.setTimeTag(time.time() + 5)
if s.__class__ == OSCServer:
waitbundle.append("Note how the (single-thread) OSCServer blocks while holding this bundle")
else:
waitbundle.append("Note how the %s does not block while holding this bundle" % s.__class__.__name__)
print "Set timetag 5 s into the future"
print "sending: ", waitbundle
c.send(waitbundle)
time.sleep(0.1)
print "Recursing bundles, with timetags set to 10 s [25 s, 20 s, 10 s]"
bb = OSCBundle("/print")
bb.setTimeTag(time.time() + 10)
b = OSCBundle("/print")
b.setTimeTag(time.time() + 25)
b.append("held for 25 sec")
bb.append(b)
b.clearData()
b.setTimeTag(time.time() + 20)
b.append("held for 20 sec")
bb.append(b)
b.clearData()
b.setTimeTag(time.time() + 15)
b.append("held for 15 sec")
bb.append(b)
if s.__class__ == OSCServer:
bb.append("Note how the (single-thread) OSCServer handles the bundle's contents in order of appearance")
else:
bb.append("Note how the %s handles the sub-bundles in the order dictated by their timestamps" % s.__class__.__name__)
bb.append("Each bundle's contents, however, are processed in random order (dictated by the kernel's threading)")
print "sending: ", bb
c.send(bb)
time.sleep(0.1)
print "\nMessages sent!"
print "\nWaiting for OSCServer. Use ctrl-C to quit.\n"
try:
while True:
time.sleep(30)
except KeyboardInterrupt:
print "\nClosing OSCServer."
s.close()
print "Waiting for Server-thread to finish"
st.join()
print "Closing OSCClient"
c.close()
print "Done"
sys.exit(0)
| Python |
# Copyright (c) 2012 eagleonhill(qiuc12@gmail.com). All rights reserved.
# Use of this source code is governed by a Mozilla-1.1 license that can be
# found in the LICENSE file.
import googlecode_upload
import tempfile
import urllib2
import optparse
import os
extensionid = 'lgllffgicojgllpmdbemgglaponefajn'
def download():
url = ("https://clients2.google.com/service/update2/crx?"
"response=redirect&x=id%3D" + extensionid + "%26uc")
response = urllib2.urlopen(url)
filename = response.geturl().split('/')[-1]
version = '.'.join(filename.replace('_', '.').split('.')[1:-1])
name = os.path.join(tempfile.gettempdir(), filename)
f = open(name, 'wb')
data = response.read()
f.write(data)
f.close()
return name, version
def upload(path, version, user, password):
summary = 'Extension version ' + version + ' download'
labels = ['Type-Executable']
print googlecode_upload.upload(
path, 'np-activex', user, password, summary, labels)
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
options, args = parser.parse_args()
name, version = download()
print 'File downloaded ', name, version
upload(name, version, options.user, options.password)
os.remove(name)
if __name__ == '__main__':
main()
| Python |
import subprocess
import tempfile
import shutil
import os
import codecs
import json
import zipfile
class Packer:
def __init__(self, input_path, outputfile):
self.input_path = os.path.abspath(input_path)
self.outputfile = os.path.abspath(outputfile)
self.tmppath = None
def pack(self):
if self.tmppath == None:
self.tmppath = tempfile.mkdtemp()
else:
self.tmppath = os.path.abspath(self.tmppath)
if not os.path.isdir(self.tmppath):
os.mkdir(self.tmppath)
self.zipf = zipfile.ZipFile(self.outputfile, 'w', zipfile.ZIP_DEFLATED)
self.processdir('')
self.zipf.close()
def processdir(self, path):
dst = os.path.join(self.tmppath, path)
if not os.path.isdir(dst):
os.mkdir(dst)
for f in os.listdir(os.path.join(self.input_path, path)):
abspath = os.path.join(self.input_path, path, f)
if os.path.isdir(abspath):
self.processdir(os.path.join(path, f))
else:
self.processfile(os.path.join(path, f))
def compact_json(self, src, dst):
print 'Compacting json file ', src
with open(src) as s:
sval = s.read()
if sval[:3] == codecs.BOM_UTF8:
sval = sval[3:].decode('utf-8')
val = json.loads(sval, 'utf-8')
with open(dst, 'w') as d:
json.dump(val, d, separators=(',', ':'))
def processfile(self, path):
src = os.path.join(self.input_path, path)
dst = os.path.join(self.tmppath, path)
if not os.path.isfile(dst) or os.stat(src).st_mtime > os.stat(dst).st_mtime:
ext = os.path.splitext(path)[1].lower()
op = None
if ext == '.js':
if path.split(os.sep)[0] == 'settings':
op = self.copyfile
elif os.path.basename(path) == 'jquery.js':
op = self.copyfile
else:
op = self.compilefile
elif ext == '.json':
op = self.compact_json
elif ext in ['.swp', '.php']:
pass
else:
op = self.copyfile
if op != None:
op(src, dst)
if os.path.isfile(dst):
self.zipf.write(dst, path)
def copyfile(self, src, dst):
shutil.copyfile(src, dst)
def compilefile(self, src, dst):
args = ['java', '-jar', 'compiler.jar',\
'--js', src, '--js_output_file', dst]
args += ['--language_in', 'ECMASCRIPT5']
print 'Compiling ', src
retval = subprocess.call(args)
if retval != 0:
os.remove(dst)
print 'Failed to generate ', dst
a = Packer('..\\chrome', '..\\plugin.zip')
a.tmppath = '..\\output'
a.pack()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
if user_name is None or password is None:
from netrc import netrc
authenticators = netrc().authenticators("code.google.com")
if authenticators:
if user_name is None:
user_name = authenticators[0]
if password is None:
password = authenticators[2]
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
maxVf = 200
# Generating the header
head = """// Copyright qiuc12@gmail.com
// This file is generated autmatically by python. DONT MODIFY IT!
#pragma once
#include <OleAuto.h>
class FakeDispatcher;
HRESULT DualProcessCommand(int commandId, FakeDispatcher *disp, ...);
extern "C" void DualProcessCommandWrap();
class FakeDispatcherBase : public IDispatch {
private:"""
pattern = """
\tvirtual HRESULT __stdcall fv{0}(char x) {{
\t\tva_list va = &x;
\t\tHRESULT ret = ProcessCommand({0}, va);
\t\tva_end(va);
\t\treturn ret;
\t}}
"""
pattern = """
\tvirtual HRESULT __stdcall fv{0}();"""
end = """
protected:
\tconst static int kMaxVf = {0};
}};
"""
f = open("FakeDispatcherBase.h", "w")
f.write(head)
for i in range(0, maxVf):
f.write(pattern.format(i))
f.write(end.format(maxVf))
f.close()
head = """; Copyright qiuc12@gmail.com
; This file is generated automatically by python. DON'T MODIFY IT!
"""
f = open("FakeDispatcherBase.asm", "w")
f.write(head)
f.write(".386\n")
f.write(".model flat\n")
f.write("_DualProcessCommandWrap proto\n")
ObjFormat = "?fv{0}@FakeDispatcherBase@@EAGJXZ"
for i in range(0, maxVf):
f.write("PUBLIC " + ObjFormat.format(i) + "\n")
f.write(".code\n")
for i in range(0, maxVf):
f.write(ObjFormat.format(i) + " proc\n")
f.write(" push {0}\n".format(i))
f.write(" jmp _DualProcessCommandWrap\n")
f.write(ObjFormat.format(i) + " endp\n")
f.write("\nend\n")
f.close()
| Python |
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = ""
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&Toolbar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
if size > 0:
size = round(size/1024)
if size < 1:
size = 1
files += """<File name="%s" size="%d" />""" % (
convertToXmlAttribute(someObject),
size
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = combinePaths(self.webUserFilesFolder, currentFolder) + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2010 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '\\.(?![^.]*$)', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
# Check for invalid folder paths (..)
if re.search( '(/\\.)|(//)|([\\\\:\\*\\?\\""\\<\\>\\|]|[\x00-\x1F]|[\x7f-\x9f])', currentFolder ):
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.