code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2011 Fredrik Strömberg <fredrik314@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
r"""
Implements a class of multiplier systems which can be used to define automorphic forms.
AUTHORS:
- Fredrik Strömberg
EXAMPLES::
sage: K=CyclotomicField(168)
sage: z=K.gen()
sage: rT=matrix(K,3,3,[z**-1,0,0,0,z**-25,0,0,0,z**-121])
sage: rS=matrix(ComplexField(53),3,3)
sage: for i in range(3):^J for j in range(3):^J rS[i,j]=2*sin(pi*(j+1)*(i+1)/7)
sage: fak=CC(1/sqrt(7*I))
"""
from sage.all import SageObject,CyclotomicField,Integer,is_even,ZZ,QQ,Rational,kronecker,is_odd,SL2Z,Gamma0,matrix,floor,ceil,lcm,copy,trivial_character,qexp_eta,var,DirichletGroup
from sage.all import kronecker_character,kronecker_character_upside_down
from sage.modular.arithgroup.arithgroup_element import ArithmeticSubgroupElement
from mysubgroups_alg import factor_matrix_in_sl2z,SL2Z_elt
class MultiplierSystem(SageObject):
r"""
Base class for multiplier systems.
A multiplier system is a function:
v : Gamma - > C^dim
s.t. there exists a merom. function of weight k f:H->C^dim
with f|A=v(A)f
"""
def __init__(self,group,dchar=(0,0),dual=False,is_trivial=False,dimension=1,**kwargs):
r"""
if dual is set to true we use the complex conjugate of the representation (we assume the representation is unitary)
The pair dchar = (conductor,char_nr) gives the character.
If char_nr = -1 = > kronecker_character
If char_nr = -2 = > kronecker_character_upside_down
"""
#print "kwargs0=",kwargs
self._group = group
self._dim = dimension
(conductor,char_nr)=dchar
self._conductor=conductor
self._char_nr=char_nr
self._character = None
if kwargs.has_key('character'):
if str(type(kwargs['character'])).find('DirichletCharacter')>=0:
self._character = kwargs['character']
self._conductor=self._character.conductor()
self._char_nr=(self._character).parent().list().index(self._character)
else:
if group.is_congruence():
if conductor<=0:
self._conductor=group.level(); self._char_nr=0
if char_nr>=0:
self._char_nr=char_nr
if self._char_nr==0:
self._character = trivial_character(self._conductor)
elif self._char_nr==-1:
self._character = kronecker_character(self._conductor)
elif self._char_nr<=-2:
self._character = kronecker_character_upside_down(self._conductor)
else:
D = DirichletGroup(self._conductor).list()
if self._char_nr <0 or self._char_nr>len(D):
self._char_nr=0
self._character = D[self._char_nr]
#if not hasattr(self._character,'is_trivial'):
# if isinstance(self._character,(int,Integer)) and group.is_congruence():
# j = self._character
# self._character = DirichletGroup(group.level())[j]
## Extract the class name for the reduce algorithm
self._class_name=str(type(self))[1:-2].split(".")[-1]
if not isinstance(dimension,(int,Integer)):
raise ValueError,"Dimension must be integer!"
self._is_dual = dual
self._is_trivial=is_trivial and self._character.is_trivial()
if is_trivial and self._character.order()<=2:
self._is_real=True
else:
self._is_real=False
self._character_values = [] ## Store for easy access
def __getinitargs__(self):
#print "get initargs"
return (self._group,(self._conductor,self._char_nr),self._is_dual,self._is_trivial,self._dim)
def __reduce__(self):
#print "reduce!"
t = self.__getinitargs__()
return self.__class__,t
#return(TrivialMultiplier,(self._group,self._dim,(self._conductor,self._char_nr),self._is_dual,self._is_trivial))
# return(type(self),(self._group,self._dim,(self._conductor,self._char_nr),self._is_dual,self._is_trivial))
def __repr__(self):
r"""
Needs to be defined in subclasses.
"""
raise NotImplementedError
def group(self):
return self._group
def __call__(self,A):
r"""
For eficientcy we should also allow to act on lists
"""
if isinstance(A,(ArithmeticSubgroupElement,SL2Z_elt,list)):
if A not in self._group:
raise ValueError,"Element %s is not in %s! " %(A,self._group)
return self._action(A)
else:
raise NotImplementedError,"Do not know how the multiplier should act on {0}".format(A)
def _action(self):
raise NotImplemented," Needs to be overridden by subclasses!"
def is_trivial(self):
return self._is_trivial
def is_real(self):
return self._is_real
def set_dual(self):
self._is_dual = True #not self._is_dual
def character(self):
return self._character
def __eq__(self,other):
r"""
A character is determined by the group it is acting on and its type, which is given by the representation.
"""
if str(type(other)).find('Multiplier')<0:
return False
if self._group<>other._group:
return False
if self._dim<>other._dim:
return False
return self.__repr__()==other.__repr__()
def __ne__(self,other):
return not self.__eq__(other)
def is_consistent(self,k):
r"""
Checks that v(-I)=(-1)^k,
"""
Z=SL2Z([-1,0,0,-1])
zi=CyclotomicField(4).gen()
v = self._action(Z)
if self._dim==1:
if isinstance(k,Integer) or k.is_integral():
if is_even(k):
v1 = ZZ(1)
else:
v1 = ZZ(-1)
elif isinstance(k,Rational) and (k.denominator()==2 or k==0):
v1 = zi**(-QQ(2*k))
else:
raise ValueError,"Only integral and half-integral weight is currently supported! Got weight:{0} of type:{1}".format(k,type(k))
else:
raise NotImplemented,"Override this function for vector-valued multipliers!"
return v1==v
class TrivialMultiplier(MultiplierSystem):
r"""
The trivial multiplier.
"""
def __init__(self,group,dchar=(0,0),dual=False,is_trivial=True,dimension=1,**kwargs):
#print "kwargs0=",kwargs
MultiplierSystem.__init__(self,group,dchar=dchar,dual=dual,is_trivial=True,dimension=dimension,**kwargs)
def __repr__(self):
if self._character<>None and not self._character.is_trivial():
s="Character "+str(self._character)
else:
s="Trivial multiplier!"
return s
def _action(self,A):
if self._character<>None:
if isinstance(A,(list,SL2Z_elt)):
d=A[3]
else:
d=A[1,1]
if len(self._character_values)>0:
d = d % self._character.modulus()
return self._character_values[d]
else:
return self._character(d)
else:
return 1
def set_prec(self,prec=None):
if prec in ["float","double"]:
self._prec=prec
else:
prec = None
def set_character_values(self):
l = self._character.values()
self._character_values=[]
if prec=="float":
for x in l:
self._character_values.append(complex(x))
elif prec=="double":
for x in l:
self._character_values.append(complex(x))
class ThetaMultiplier(MultiplierSystem):
r"""
Theta multiplier
"""
def __init__(self,group,dchar=(0,0),dual=False,is_trivial=False,dimension=1,weight=QQ(1/2),**kwargs):
if not ZZ(4).divides(group.level()):
raise ValueError," Need level divisible by 4. Got:%s " % self._group.level()
MultiplierSystem.__init__(self,group,dchar=dchar,dual=dual,is_trivial=is_trivial,dimension=dimension,**kwargs)
self._i = CyclotomicField(4).gen()
self._one = self._i**4
self._weight=QQ(weight)
## We have to make sure that we have the correct multiplier & character
## for the desired weight
if weight<>None:
if floor(2*weight)<>ceil(2*weight):
raise ValueError," Use ThetaMultiplier for half integral or integral weight only!"
t = self.is_consistent(weight)
if not t:
self.set_dual()
t1 = self.is_consistent(weight)
if not t1:
raise ArithmeticError,"Could not find consistent theta multiplier! Try to add a character."
def __repr__(self):
s="Theta multiplier"
if self._is_dual:
s+=" v^-1 "
else:
s+=" v "
if self._character<>None and not self._character.is_trivial():
s+=" and character "+str(self._character)
return s
def __latex__(self):
if self._is_dual:
s=" \bar{v_{\theta}} "
else:
s=" v_{\theta} "
if self._character<>None and not self._character.is_trivial():
s+=" \cdot "
if self._character == kronecker_character(self._conductor):
s+=" \left( \frac\{\cdot\}\{ {0} \}\right)".format(self._conductor)
elif self._character == kronecker_character_upside_down(self._conductor):
s+=" \left( \frac\{ {0} \}\{ \cdot \}\right)".format(self._conductor)
else:
s+=" \chi_\{ {0}, {1} \}".format(self._conductor,self._char_nr)
return s
def __getinitargs__(self):
#print "get initargs"
return (self._group,(self._conductor,self._char_nr),self._is_dual,self._is_trivial,self._dim,self._weight)
def order(self):
return 4
def _action(self,A):
[a,b,c,d]=A
v=kronecker(c,d)*self._one ### I want the type of the result always be a number field element
if(d % 4 == 3):
v=-v*self._i
elif (c % 4 <> 0):
raise ValueError,"Only use theta multiplier for 4|c!"
if self._character<>None:
v = self._character(d)*v
if self._is_dual:
v = v**-1
return v
class EtaMultiplier(MultiplierSystem):
r"""
Eta multiplier. Valid for any (real) weight.
"""
def __init__(self,G,k=QQ(1)/QQ(2),number=0,ch=None,dual=False,version=1,dimension=1,**kwargs):
r"""
Initialize the Eta multiplier system: $\nu_{\eta}^{2(k+r)}$.
INPUT:
- G -- Group
- ch -- character
- dual -- if we have the dual (in this case conjugate)
- weight -- Weight (recall that eta has weight 1/2 and eta**2k has weight k. If weight<>k we adjust the power accordingly.
- number -- we consider eta^power (here power should be an integer so as not to change the weight...)
"""
self._weight=QQ(k)
if floor(self._weight-QQ(1)/QQ(2))==ceil(self._weight-QQ(1)/QQ(2)):
self._half_integral_weight=1
else:
self._half_integral_weight=0
MultiplierSystem.__init__(self,G,character=ch,dual=dual,dimension=dimension)
number = number % 12
if not is_even(number):
raise ValueError,"Need to have v_eta^(2(k+r)) with r even!"
self._pow=QQ((self._weight+number)) ## k+r
self._k_den=self._pow.denominator()
self._k_num=self._pow.numerator()
self._K = CyclotomicField(12*self._k_den)
self._z = self._K.gen()**self._k_num
self._i = CyclotomicField(4).gen()
self._fak = CyclotomicField(2*self._k_den).gen()**-self._k_num
self._version = version
self.is_consistent(k) # test consistency
def __repr__(self):
s="Eta multiplier "
if self._pow<>1:
s+="to power 2*"+str(self._pow)+" "
if self._character<>None and not self._character.is_trivial():
s+=" and character "+str(self._character)
s+="with weight="+str(self._weight)
return s
def order(self):
return 12*self._k_den
def z(self):
return self._z
def _action(self,A):
if self._version==1:
return self._action1(A)
elif self._version==2:
return self._action2(A)
else:
raise ValueError
def _action1(self,A):
[a,b,c,d]=A
return self._action0(a,b,c,d)
def _action0(self,a,b,c,d):
r"""
Recall that the formula is valid only for c>0. Otherwise we have to use:
v(A)=v((-I)(-A))=sigma(-I,-A)v(-I)v(-A).
Then note that by the formula for sigma we have:
sigma(-I,SL2Z[a, b, c, d])=-1 if (c=0 and d<0) or c>0 and other wise it is =1.
"""
fak=1
if c<0:
a=-a; b=-b; c=-c; d=-d; fak=-self._fak
if c==0:
if a>0:
res = self._z**b
else:
res = self._fak*self._z**b
else:
if is_even(c):
arg = (a+d)*c-b*d*(c*c-1)+3*d-3-3*c*d
v=kronecker(c,d)
else:
arg = (a+d)*c-b*d*(c*c-1)-3*c
v=kronecker(d,c)
if not self._half_integral_weight:
# recall that we can use eta for any real weight
v=v**(2*self._weight)
arg=arg*(self._k_num)
res = v*fak*self._z**arg
if self._character:
res = res * self._character(d)
if self._is_dual:
res=res**-1
return res
def _action2(self,A):
[a,b,c,d]=A
fak=1
if c<0:
a=-a; b=-b; c=-c; d=-d; fak=-self._fak
if c==0:
if a>0:
res = self._z**b
else:
res = self._fak*self._z**b
else:
arg = dedekind_sum(-d,c)
arg = arg+QQ(a+d)/QQ(12*c)-QQ(1)/QQ(4)
# print "arg=",arg
arg=arg*QQ(2)
den = arg.denominator()*self._k_den
num = arg.numerator()*self._k_num
K = CyclotomicField(2*den)
z=K.gen()
if z.multiplicative_order()>4:
fak=K(fak)
# z = CyclotomicField(2*arg.denominator()).gen()
res = z**num #rg.numerator()
if self._character:
ch = self._character(d)
res=res*ch
res = res*fak
if self._is_dual:
return res**-1
return res
class TestMultiplier(MultiplierSystem):
r"""
Test of multiplier for f(q). As in e.g. the paper of Bringmann and Ono.
"""
def __init__(self,group,dchar=(0,0),dual=False,weight=QQ(1)/QQ(2),dimension=1,version=1,**kwargs):
self._weight=QQ(weight)
MultiplierSystem.__init__(self,group,dchar=dchar,dual=dual,dimension=dimension,**kwargs)
self._k_den=self._weight.denominator()
self._k_num=self._weight.numerator()
self._K = CyclotomicField(12*self._k_den)
self._z = self._K.gen()**self._k_num
self._sqrti = CyclotomicField(8).gen()
self._i = CyclotomicField(4).gen()
self._fak = CyclotomicField(2*self._k_den).gen()**-self._k_num
self._fak_arg=QQ(self._weight)/QQ(2)
self._version = version
self.is_consistent(weight) # test consistency
def order(self):
return 12*self._k_den
def z(self):
return self._z
def __repr__(self):
s="Test multiplier"
if self._character<>None and not self._character.is_trivial():
s+="and character "+str(self._character)
return s
def _action(self,A):
[a,b,c,d]=A
fak=0
if c<0:
a=-a; b=-b; c=-c; d=-d; fak=self._fak_arg
if c==0:
if a>0:
res = self._z**-b
else:
res = self._fak*self._z**-b
else:
arg=-QQ(1)/QQ(8)+QQ(c+a*d+1)/QQ(4)-QQ(a+d)/QQ(24*c)-QQ(a)/QQ(4)+QQ(3*d*c)/QQ(8)
# print "arg=",arg
arg = arg-dedekind_sum(-d,c)/QQ(2)+fak #self._fak_arg
den=arg.denominator()
num=arg.numerator()
# print "den=",den
# print "num=",num
res = self._K(CyclotomicField(den).gen())**num
#res = res*fak
if self._is_dual:
return res**-1
return res
class MultiplierByGenerator(MultiplierSystem):
r"""
Give a generator by its values on the generators.
For now, only implemented for SL2Z.
EXAMPLE:
sage: K = CyclotomicField(24)
sage: z=K.gen()
sage: rS = matrix(K,3,3,[0, z**-3, 0, z**-3, 0, 0, 0, 0, -z**-3])
sage: rT = matrix(K,3,3,[z**-1, 0, 0, 0, 0, z**8, 0, z**8, 0])
sage: rZ = rS*rS
sage: rho=MultiplierByGenerator(SL2Z,rS,rT,rZ)
sage: te=TestMultiplier(Gamma0(2),weight=1/2)
sage: r=InducedRepresentation(Gamma0(2),v=te)
"""
def __init__(self,group,gens=[],vs=[],**kwargs):
if len(gens)<>len(vs):
raise ValueError,"Need generators and values of the same form!"
if hasattr(vs[0],'nrows'):
dim=vs[0].nrows()
assert vs[0].ncols()==dim and vs[0].nrows()==dim
else:
dim=1
MultiplierSystem.__init__(self,group,dimension=dim)
for i in range(len(gens)):
self.vals[gens[i]]=vs[i]
#if Z<>None:
#self.T=rT
#self.S=rS
#self.Z=rZ
def __repr__(self):
s="Multiplier system defined by action on the generators:"
if self._group==SL2Z:
S,T=SL2Z.gens()
Z=S*S
s+="r(S)=",self.vals[S]
s+="r(T)=",self.vals[T]
s+="r(Z)=",self.vals[Z]
else:
for g in self.vals.keys():
s+="r(",g,")=",self.vals[g]
return s
def _action(self,A):
#if A not in self._group:
# raise ValueError,"Action is only defined for {0}!".format(self._group)
a,b,c,d=A
if self._group==SL2Z:
[z,n,l]=factor_matrix_in_sl2z(int(a),int(b),int(c),int(d))
# l,ep = factor_matrix_in_sl2z_in_S_and_T(A_in)
res = self.T.parent().one()
if z==-1 and self.Z<>None:
res = self.Z #v(SL2Z[-1,0,0,-1])
# for j in range(self.dim):
# res[j,j]=tmp
if n<>0:
res = self.T**n
for i in range(len(l)):
res = res*self.S*self.T**l[i]
elif A in self.vals.keys():
res = self.vals[A]
else:
raise NotImplementedError,"Can not write as word in generators of {0}".format(self._group)
return res
class InducedRepresentationMultiplier(MultiplierSystem):
def __init__(self,G,v=None,**kwargs):
r"""
G should be a subgroup of PSL(2,Z).
EXAMPLE::
sage: te=TestMultiplier(Gamma0(2),weight=1/2)
sage: r=InducedRepresentation(Gamma0(2),v=te)
"""
dim = len(list(G.coset_reps()))
MultiplierSystem.__init__(self,Gamma0(1),dimension=dim)
self._induced_from=G
# setup the action on S and T (should be faster...)
self.v = v
if v<>None:
k = v.order()
if k>2:
K = CyclotomicField(k)
else:
K=ZZ
self.S=matrix(K,dim,dim)
self.T=matrix(K,dim,dim)
else:
self.S=matrix(dim,dim)
self.T=matrix(dim,dim)
S,T=SL2Z.gens()
if hasattr(G,"coset_reps"):
if isinstance(G.coset_reps(),list):
Vl=G.coset_reps()
else:
Vl=list(G.coset_reps())
elif hasattr(G,"_G"):
Vl=list(G._G.coset_reps())
else:
raise ValueError,"Could not get coset representatives from {0}!".format(G)
self.repsT=dict()
self.repsS=dict()
for i in range(dim):
Vi=Vl[i]
for j in range(dim):
Vj=Vl[j]
BS = Vi*S*Vj**-1
BT = Vi*T*Vj**-1
#print "i,j
#print "ViSVj^-1=",BS
#print "ViTVj^-1=",BT
if BS in G:
if v<>None:
vS=v(BS)
else:
vS=1
self.S[i,j]=vS
self.repsS[(i,j)]=BS
if BT in G:
if v<>None:
vT=v(BT)
else:
vT=1
self.T[i,j]=vT
self.repsT[(i,j)]=BT
def __repr__(self):
s="Induced representation from "
s+=str(self.v)+" on the group "+str(self._induced_from)
return s
def induced_from(self):
self._induced_from
def _action(self,A):
#if A not in self._group:
# raise ValueError,"Action is only defined for {0}!".format(self._group)
a,b,c,d=A
[z,n,l]=factor_matrix_in_sl2z(int(a),int(b),int(c),int(d))
#l,ep = factor_matrix_in_sl2z_in_S_and_T(A_in)
res = copy(self.T.parent().one())
if z==-1 and self.v<>None:
tmp = self.v(SL2Z([-1,0,0,-1]))
for j in range(self._dim):
res[j,j]=tmp
if n<>0:
res = self.T**n
for i in range(len(l)):
res = res*self.S*self.T**l[i]
return res
class WeilRepMultiplier(MultiplierSystem):
def __init__(self,WR,weight=QQ(1)/QQ(2),use_symmetry=True,**kwargs):
r"""
WR should be a Weil representation.
INPUT:
- weight -- weight (should be consistent with the signature of self)
- use_symmetry -- if False we do not symmetrize the functions with respect to Z
-- if True we need a compatible weight
"""
if isinstance(WR,(Integer,int)):
self.WR=WeilRepDiscriminantForm(WR)
else:
self.WR=WR
self._sym_type = 0
if use_symmetry:
t=2*weight
try:
Integer(t)
except:
raise ValueError, "Need half-integral value of weight! Got k=%s" %(weight)
ti=Integer(float(t))
if ti % 4 == self.WR._signature:
sym_type = 1
else:
sym_type= -1
##if self.WR.is_dual():
# sym_type=-sym_type
if sym_type==1:
Dstart=int(0); Dfinish=int(self.WR.N) # 0,1,...,N
elif sym_type==-1:
Dstart=int(1); Dfinish=int(self.WR.N-1) # 1,2,...,N-1 (since -0=0 and -N=N)
self._sym_type=sym_type
dim = Dfinish-Dstart+1
else:
dim = len(self.WR.D)
MultiplierSystem.__init__(self,self.WR.group,dimension=dim)
def __repr__(self):
s="Weil representation corresponding to "+str(self.WR)
return s
def _action(self,A):
return self.WR.rho(A)
def is_consistent(self,k):
r"""
Return True if the Weil representation is a multiplier of weight k.
"""
print "is_consistent at wr!"
twok = 2*k
if not is_integral(twok):
return False
if self._sym_type <>0:
if is_odd(self.WR._signature):
return (twok % 4 == (self._sym_type*self.WR._signature) % 4)
else:
if self.WR._signature % 4 == (1 - self._sym_type) % 4:
return twok % 4 == 0
else:
return twok % 4 == 1
if is_even(twok) and is_even(self.WR._signature):
return True
if is_odd(twok) and is_odd(self.WR._signature):
return True
return False
class EtaQuotientMultiplier(MultiplierSystem):
r"""
Eta multiplier given by eta(Az)^{r}/eta(Bz)^s
The weight should be r/2-s/2 mod 2.
The group is Gamma0(lcm(A,B))
"""
def __init__(self,A,B,r,s,k=None,number=0,ch=None,dual=False,version=1,**kwargs):
r"""
Initialize the Eta multiplier system: $\nu_{\eta}^{2(k+r)}$.
INPUT:
- G -- Group
- ch -- character
- dual -- if we have the dual (in this case conjugate)
- weight -- Weight (recall that eta has weight 1/2 and eta**2k has weight k. If weight<>k we adjust the power accordingly.
- number -- we consider eta^power (here power should be an integer so as not to change the weight...)
EXAMPLE:
"""
self.level=lcm(A,B)
G = Gamma0(self.level)
if k==None:
k = (QQ(r)-QQ(s))/QQ(2)
self._weight=QQ(k)
if floor(self._weight-QQ(1)/QQ(2))==ceil(self._weight-QQ(1)/QQ(2)):
self._half_integral_weight=1
else:
self._half_integral_weight=0
MultiplierSystem.__init__(self,G,dimension=1,character=ch,dual=dual)
number = number % 12
if not is_even(number):
raise ValueError,"Need to have v_eta^(2(k+r)) with r even!"
self._arg_num = A
self._arg_den = B
self._exp_num = r
self._exp_den = s
self._pow=QQ((self._weight+number)) ## k+r
self._k_den=self._pow.denominator()
self._k_num=self._pow.numerator()
self._K = CyclotomicField(12*self._k_den)
self._z = self._K.gen()**self._k_num
self._i = CyclotomicField(4).gen()
self._fak = CyclotomicField(2*self._k_den).gen()**-self._k_num
self._version = version
self.is_consistent(k) # test consistency
def __repr__(self):
s="Quotient of Eta multipliers : "
s+="eta({0})^{1}/eta({2})^{3}".format(self._arg_num,self._exp_num,self._arg_den,self._exp_den)
if self._character<>None and not self._character.is_trivial():
s+=" and character "+str(self._character)
s+=" with weight="+str(self._weight)
return s
def order(self):
return 12*self._k_den
def z(self):
return self._z
def q_shift(self):
r"""
Gives the 'shift' at the cusp at infinity of the q-series.
The 'true' q-expansion of the eta quotient is then q^shift*q_expansion
"""
num = self._arg_num*self._exp_num-self._arg_den*self._exp_den
return QQ(num)/QQ(24)
def q_expansion(self,n=20):
r"""
Give the q-expansion of the quotient.
"""
var('q')
et = qexp_eta(ZZ[['q']],n)
etA= et.subs(q=q**self._arg_num).power_series(ZZ[['q']])
etB= et.subs(q=q**self._arg_den).power_series(ZZ[['q']])
res = etA**(self._exp_num)/etB**(self._exp_den)
return res
#def _action(self,A):
# return self._action(A)
def _action(self,A):
[a,b,c,d]=A
if not c % self.level == 0 :
raise ValueError,"Need A in {0}! Got: {1}".format(self.group,A)
fak=1
if c<0:
a=-a; b=-b; c=-c; d=-d; fak=-self._fak
#fak = fak*(-1)**(self._exp_num-self._exp_den)
arg1,v1 = eta_conjugated(a,b,c,d,self._arg_num)
arg2,v2 = eta_conjugated(a,b,c,d,self._arg_den)
res=self._z**(arg1*self._exp_num-arg2*self._exp_den)
if v1<>1:
res=res*v1**self._exp_num
if v2<>1:
res=res/v2**self._exp_den
if fak<>1:
res=res*fak**(self._exp_num-self._exp_den)
return res
def eta_conjugated(a,b,c,d,l):
r"""
Gives eta(V_l A V_l^-1) with A=(a,b,c,d) for c>0
"""
assert c>=0 and (c%l)==0
if l<>1:
cp = QQ(c)/QQ(l)
bp = QQ(b)*QQ(l)
else:
cp=c; bp=b
if c==0:
l*bp,1
res = (a+d)*cp-bp*d*(cp*cp-1)
if is_odd(c):
return res-3*cp,kronecker(d,cp)
else:
return res+3*d-3-3*cp*d,kronecker(cp,d)
def eta_argument(a,b,c,d):
res = (a+d)*c-b*d*(c*c-1)
den = 24
if is_odd(c):
return res-3*c,den,kronecker(d,c)
else:
return res+3*d-3-3*c*d,den,kronecker(d,c)
def saw_tooth_fn(x):
if floor(x) == ceil(x):
return 0
elif x in QQ:
return QQ(x)-QQ(floor(x))-QQ(1)/QQ(2)
else:
return x-floor(x)-0.5
def dedekind_sum(d,c):
res=0
for i in range(c):
tmp = saw_tooth_fn(QQ(i)/QQ(c))
tmp*= saw_tooth_fn(QQ(d*i)/QQ(c))
res=res+tmp
return res
| Python |
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2010 Fredrik Strömberg <stroemberg@mathematik.tu-darmstadt.de>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import mpmath as mpmath
from sage.structure.element import Element
from sage.structure.parent import Parent
from sage.structure.sage_object import SageObject,cPickle
from sage.functions.all import ln,sqrt,floor
from sage.rings.arith import divisors,gcd,inverse_mod
from sage.modular.dirichlet import DirichletGroup
from sage.rings.all import RR
from sage.modular.arithgroup.all import Gamma0
from sage.all import trivial_character,timeit,RealNumber,ComplexNumber,log,is_squarefree,prime_range,next_prime
from maass_forms_alg import *
from lpkbessel import *
from automorphic_forms import *
from eisenstein_series import Eisenstein_series_one_cusp
#from mysubgroup import is_Hecke_triangle_group
import matplotlib
import warnings
r"""
Maass waveforms for subgroups of the modular group and Eisenstein series for Hecke triangle groups.
AUTHORS:
- Fredrik Strömberg (March 2010-)
EXAMPLES::
?
TODO:
- Nontrivial multiplier systems and weights
- improve eigenvalue finding alorithms
"""
class MaassWaveForms (AutomorphicFormSpace):
r"""
Describes a space of Maass waveforms (cuspforms)
"""
#def __init__(self,G,prec=500,ST=None,character=None,verbose=0,weight=0,**kwds):
def __init__(self,G,weight=0,multiplier="",ch=0,sym_type=None,cusp_evs=None,hecke=False,verbose=0,dprec=None,prec=None,**kwds):
r"""
Creates an ambient space of Maass waveforms
INPUT:
- `'G`` -- subgroup of the modular group
- ``prec`` -- default working precision in bits
- ``dprec`` -- default working precision in digits
- ``Hecke`` -- if set to True we assume that we want Hecke eigenforms. In particular this implies that we have to work with complex numbers also for real characters.
EXAMPLES::
sage: S=MaassWaveForms(Gamma0(1)); S
Space of Maass waveforms on the group G:
Arithmetic Subgroup of PSL2(Z) with index 1. Given by:
perm(S)=()
perm(ST)=()
Constructed from G=Modular Group SL(2,Z)
"""
self._ch=ch
self._hecke=hecke
if dprec==None and prec==None:
dprec=53; prec=15
elif dprec==None:
dprec=floor(RR(prec)/3.4)
else:
prec=ceil(3.4*dprec)+1
AutomorphicFormSpace.__init__(self,G,weight=weight,multiplier=multiplier,character=ch,holomorphic=False,weak=False,cuspidal=True,unitary_action=1,dprec=dprec,verbose=verbose)
self._eps = 2.0**(3.0-self._dprec)
if sym_type == None:
self._sym_type = -1 #self.set_norm()
else:
self._sym_type = sym_type
#if kwds.has_key('sym_type'):
# self._ST['sym_type']=kwds['sym_type']
self._Weyl_law_const=self._Weyl_law_consts()
maass_forms=dict() # list of members
self._use_real=True
if not self._multiplier.is_trivial():
if not self._multiplier.is_real() or hecke:
self._use_real=False
if self._weight<>0:
self._use_real=False
if self._sym_type not in [0,1]:
self._use_real=False
if self._sym_type in [0,1]:
#if self._group.is_congruence():
## We can now use symmetries for most cusps (and leave the other with exponentials)
#if self._group._symmetrizable_cusp.values().count(1)<>self._group._ncusps:
#if not is_squarefree(self._group._level):
# raise ValueError,"For non-square symmetrizable groups we should not use even/odd symmetry!"
if not self._group.is_congruence() and self._group.ncusps()>1:
raise ValueError,"For non-cycloidal non-congruence subgroups we should not use even/odd symmetry!"
## Stores Atkin-Lehner and similar eigenvalues, i.e.
## for cusps
self._symmetry=None
self._even_odd_symmetries={}
self._cusp_symmetries={}
self._cusp_evs_dict={}
self._cusp_evs=[]
if cusp_evs:
self.set_cusp_evs(cusp_evs)
self.__dict__
# check consistency between cusp eigenvalues and even/odd symmetry
for i in range(self._group._ncusps):
if self.cusp_evs()[i]<>0 and self.even_odd_symmetries()[i][0]<>1 and self._sym_type<>-1:
raise ValueError,"Got incompatible symmetry information!"
# self._verbose=0 # output debugging stuff or not
#else:
# self._verbose=verbose
self._smallest_M0=0
self._is_maass_waveform_space=True
def weight(self):
return self._weight
def sym_type(self):
return self._sym_type
def cusp_evs_dict(self):
return self._cusp_evs_dict
def cusp_evs(self):
if not self._cusp_evs:
self._cusp_evs=[1]
for i in range(self._group._ncusps-1):
self._cusp_evs.append(0)
return self._cusp_evs
def set_sym_type(self,s):
if s not in [0,1,-1]:
raise ValueError,"{0} is not a valid symmetry type!".format(s)
# check consistency between cusp eigenvalues and even/odd symmetry
for i in range(self._group._ncusps):
if self.cusp_evs()[i]<>0 and self.even_odd_symmetries()[i][0]<>1 and self._sym_type<>-1:
raise ValueError,"Got incompatible symmetry information!"
self._sym_type=s
def set_cusp_evs(self,cusp_evs={}):
if isinstance(cusp_evs,dict):
self._cusp_evs_dict=copy(cusp_evs)
self._cusp_evs=[]
for c in range(self._group.ncusps()):
d=self._cusp_evs_dict.get(c,0)
self._cusp_evs.append(d)
elif isinstance(cusp_evs,list):
self._cusp_evs=copy(cusp_evs)
#print "cusp_Evs=",cusp_evs
for c in range(len(cusp_evs)):
d=self._cusp_evs[c]
self._cusp_evs_dict[c]=d
else:
raise TypeError,"Could not get cusp eigenvalues from {0}!".format(cusp_evs)
## The first eigenvalue is always 1
self._cusp_evs[0]=1
self._cusp_evs_dict[0]=1
## Also check that the eigenvalues are compatible
for c in range(self._group.ncusps()):
o,d=self.cusp_symmetries().get(c,(-1,0))
ev=self._cusp_evs_dict.get(c,0)
#print "o,d,ev:",o,d,ev
if o==0 and ev<>0:
s = "The cusp nr. {0} does not appear to have an involution!".format(c)
warnings.warn(s)
self._cusp_evs[c]=0
elif o>0:
if ev<>0 and abs(ev**o-1)>self._eps:
s = "The cusp nr. {0} has involution of order {1} and the assumed eigenvalue {2} does not have this order!".format(c,o,ev)
warnings.warn(s)
self._cusp_evs[c]=0
# check consistency between cusp eigenvalues and even/odd symmetry
for i in range(self._group._ncusps):
if self.cusp_evs()[i]<>0 and self.even_odd_symmetries()[i][0]<>1 and self._sym_type<>-1:
if self._verbose>0:
print "i=",i
print "self._cusp=",self._cusp_evs[i]
print "eo_sym=",self.even_odd_symmetries()[i]
print "sym_type=",self._sym_type
raise ValueError,"Got incompatible symmetry information!"
#return self._cusp_evs
def even_odd_symmetries(self):
r"""
Check even/odd symmetries and behaviour with respect to the character
"""
if self._even_odd_symmetries<>{}:
return self._even_odd_symmetries
res={}
for j in range(self._group.ncusps()):
if self._ch==0:
res[j]=self._group._symmetrizable_cusp[j],1
else:
if self._group._symmetrizable_cusp[j]==0:
res[j]=0,0
continue
a,b,c,d=self._group._cusp_data[j]['normalizer']
res[j]=1,self._character(a*d+b*c)
return res
def cusp_symmetries(self):
r"""
Check cusp symmetries (involutions) and their behaviour with respect to the character
"""
if self._cusp_symmetries<>{}:
return self._cusp_symmetries
res={}
for j in range(self._group.ncusps()):
o,d = self._group.cusp_normalizer_is_normalizer(j)
if o==0:
res[j]=0,0
elif self._ch==0:
res[j]=o,1
else:
x,y,z,w=self._group._cusp_data[j]['normalizer']
l = self._group._cusp_data[j]['width']
N = self._group._level
q = self._character.modulus()
#if self._verbose>0:
# #print "lz=",l*z
if (l*z % N) <> 0:
res[j]=0,0
else:
## Check that the character is uniquely determined on sigma_j gamma sigma_j^-1
## for all gamma = (a b ; c d) in Gamma_0(N)
vals=[]
for a in range(N):
if gcd(a,N)>1:
continue
d = inverse_mod(a,N)
dp=x*w*d-y*z*a
xi = self._character(dp)
xj = self._character(d)
xij=xi/xj
if xij not in vals:
vals.append(xij)
# Note that if vals<>[1] then this map is not really an involution
# since it sends a character to another character
# but
if len(vals)==1:
res[j]=o,vals[0]
else:
res[j]=0,0
self._cusp_symmetries=res
return res
def __repr__(self):
r"""
Return the string representation of self.
EXAMPLES::
sage: M=MaassWaveForms(MySubgroup(Gamma0(1)));M
Space of Maass waveforms on the group G:
Arithmetic Subgroup of PSL2(Z) with index 1. Given by:
perm(S)=()
perm(ST)=()
Constructed from G=Modular Group SL(2,Z)
"""
s="Space of Maass waveforms "
s+="of weight = "+str(self._weight)+" "
if str(self._multiplier).find("theta_multiplier")>0:
s+=" with theta multiplier "
elif not self._multiplier.is_trivial():
s+=" with multiplier:\n"+str(self._multiplier)
else:
s+=" with trivial multiplier "
s+=" on "
if self._group._is_Gamma0:
s+='Gamma0({0})'.format(self._group._level)
else:
s+="the group G:\n"+str(self._group)
return s
return s
def __reduce__(self):
r""" Used for pickling.
"""
return(MaassWaveForms,(self._group,self._weight,self._multiplier,self._character,self._sym_type,self._cusp_evs,self._hecke,self._verbose,self._dprec,self._prec))
def __ne__(self,other):
if self._verbose>1:
print "in MaassWaveForms.__ne__"
return not self.__eq__(other)
def __eq__(self,other):
if not hasattr(other,"_is_maass_waveform_space"):
return False
if self._verbose>1:
print "in MaassWaveForms.__eq__"
l0=self.__reduce__()
l1=other.__reduce__()
## We allow to differ in precision and verbosity
if l0[0]<>l0[0]:
return False
for j in range(0,5):
#print "comparing A:",l0[1][j]
#print "comparing B:",l1[1][j]
if l0[1][j]<>l1[1][j]:
#print "A<>B!"
return False
return True
#return self.__reduce__() == other.__reduce__()
#G,weight=0,multiplier="",ch=0,dprec=None,prec=None,sym_type=None,verbose=0,hecke=True,**kwds):
def __cmp__(self,other):
r""" Compare self to other
"""
if not isinstance(other,type(self)):
return False
if(self._group <> other._group or self.prec<>other.prec):
return False
else:
return True
def group(self):
return self._group
def level(self):
return self._group.level()
def get_element(self,R,Mset=None,Yset=None,dim=1,ndigs=12,set_c=None,**kwds):
#if sym_type==None:
# sym_type=self._sym_type
try:
if RR(R).is_infinity() or RR(R).is_NaN() or R<=0.0:
raise Exception
except:
raise ValueError,"R must be a (finite) real! Got R:{0}".format(R)
if dim>1:
return self.get_Hecke_basis(R,None,Mset,Yset,dim,ndigs,set_c)
## We assume we have a scalar-valued Maass form for the moment
elif dim==1:
NN = self.set_norm(1,set_c=set_c); M0=0; Y0 = float(0.0)
if Mset<>None: M0 = int(Mset)
if Yset<>None: Y0 = float(Yset)
if self._verbose>0:
print "Y0=",Y0
print "M0=",M0
print "NN=",NN
C = get_coeff_fast_cplx_dp_sym(self,R,Y0,M0,0,NN)
F=MaassWaveformElement(self,R,C=C,compute=False)
return F
#X=coefficients_for_Maass_waveforms(self,R,Y,M,Q,ndigs,cuspidal=True,sym_type=sym_type,dim=dim,set_c=set_c)
#F._coeffs[0]=X[0]
else:
raise ValueError,"Can not compute Maass forms of dimension {0}".format(dim)
def get_Hecke_basis(self,R,p=None,Mset=None,Yset=None,dim=1,ndigs=12,set_c=None):
if dim==1:
return self.get_element(R,Mset,Yset,dim,ndigs,set_c)
#NN = self.set_norm(dim)
#param=self.set_default_parameters(R,Mset,Yset,ndigs)
#Y0=param['Y']; Q=param['Q']; M0=param['M']
NN = self.set_norm(dim); M0=0; Y0 = float(0.0); Q=0
if Mset<>None: M0 = int(Mset); Q=M0+10
if Yset<>None: Y0 = float(Yset)
if self._verbose>0:
print "Get Hecke basis with:{0},{1},{2},{3},{4}".format(R,Y0,M0,Q,dim)
X = get_coeff_fast_cplx_dp_sym(self,R,Y0,M0,Q,NN)
if p==None:
p = self.get_primitive_p()
H = self.Hecke_eigenfunction_from_coeffs(X,p)
res = []
for i in H.keys(): #range(dim):
#print "H[",i,"][0][-1]=",H[i][0][-1]
#C={0:H[i]}
F = MaassWaveformElement(self,R,C=H[i],sdim=1,compute=False,hecke_p=p)
res.append(F)
return res
def get_element_in_range(self,R1,R2,sym_type=None,Mset=None,Yset=None,dim=1,ndigs=12,set_c=None,neps=10):
r""" Finds element of the space self with R in the interval R1 and R2
INPUT:
- ``R1`` -- lower bound (real)
- ``R1`` -- upper bound (real)
"""
# Dummy element
F=MaassWaveformElement(self,R2,sym_type=sym_type,dim=dim,compute=False)
param=self.set_default_parameters(R2,Mset,Yset,ndigs)
Y=param['Y']
Q=param['Q']
M=param['M']
if self._verbose>0:
print "Y=",Y
print "M=",M
print "Q=",Q
l=self.split_interval(R1,R2)
if self._verbose>1:
print "Split into intervals:"
for [r1,r2,y] in l:
print "[",r1,",",r2,"]:",y
Rl=list()
for [r1,r2,y] in l:
[R,er]=find_single_ev(self,r1,r2,Yset=y,neps=neps)
Rl.append([R,er])
if self._verbose>0:
print "R=",R
print "er=",er
def _Weyl_law_consts(self):
r"""
Compute constants for the Weyl law on self.group
OUTPUT:
- tuple of real numbers
EXAMPLES::
sage: M=MaassWaweForms(MySubgroup(Gamma0(1))
sage: M._Weyl_law_consts
(0, 2/pi, (log(pi) - log(2) + 2)/pi, 0, -2)
"""
import mpmath
pi=mpmath.fp.pi
ix=Integer(self._group.index())
nc=self._group.ncusps()
if(self._group.is_congruence()):
lvl=Integer(self._group.level())
else:
lvl=0
n2=Integer(self._group.nu2())
n3=Integer(self._group.nu3())
if is_Hecke_triangle_group(self._group):
if self._group._is_Gamma0:
q=3
else:
q=self._group._q
c1=(q-2)/(4*q)
else:
c1=ix/Integer(12)
c2=Integer(2)*nc/pi
c3=nc*(Integer(2)-ln(Integer(2))+ln(pi))/pi
if lvl<>0:
A=1
for q in divisors(lvl):
num_prim_dc=0
DG=DirichletGroup(q)
for chi in DG.list():
if(chi.is_primitive()):
num_prim_dc=num_prim_dc+1
for m in divisors(lvl):
if(lvl % (m*q) == 0 and m % q ==0 ):
fak=(q*lvl)/gcd(m,lvl/m)
A=A*Integer(fak)**num_prim_dc
c4=-ln(A)/pi
else:
c4=Integer(0)
# constant term
c5=-ix/144+n2/8+n3*2/9-nc/4-1
return (c1,c2,c3,c4,c5)
def Weyl_law_N(self,T,T1=None):
r"""
The counting function for this space. N(T)=#{disc. ev.<=T}
INPUT:
- ``T`` -- double
EXAMPLES::
sage: M=MaassWaveForms(MySubgroup(Gamma0(1))
sage: M.Weyl_law_N(10)
0.572841337202191
"""
(c1,c2,c3,c4,c5)=self._Weyl_law_const
cc1=RR(c1); cc2=RR(c2); cc3=RR(c3); cc4=RR(c4); cc5=RR(c5)
#print "c1,c2,c3,c4,c5=",cc1,cc2,cc3,cc4,cc5
t=sqrt(T*T+0.25)
try:
lnt=ln(t)
except TypeError:
lnt=mpmath.ln(t)
#print "t,ln(t)=",t,lnt
NT=cc1*t*t-cc2*t*lnt+cc3*t+cc4*t+cc5
if(T1<>None):
t=sqrt(T1*T1+0.25)
NT1=cc1*(T1*T1+0.25)-cc2*t*ln(t)+cc3*t+cc4*t+cc5
return RR(abs(NT1-NT))
else:
return RR(NT)
def next_eigenvalue(self,R):
r"""
An estimate of where the next eigenvlue will be, i.e. the smallest R1>R so that N(R1)-N(R)>=1
INPUT:
- ``R`` -- real > 0
OUTPUT:
- real > R
EXAMPLES::
sage: M.next_eigenvalue(10.0)
12.2500000000000
"""
#cdef nmax
N=self.Weyl_law_N(R)
try:
for j in range(1,10000):
R1=R+j*RR(j)/100.0
N1=self.Weyl_law_N(R1)
if(N1-N >= 1.0):
raise StopIteration()
except StopIteration:
return R1
else:
raise ArithmeticError,"Could not find next eigenvalue! in interval: [%s,%s]" %(R,R1)
def Weyl_law_Np(self,T,T1=None):
r"""
The derviative of the counting function for this space. N(T)=#{disc. ev.<=T}
INPUT:
- ``T`` -- double
EXAMPLES::
sage: M=MaassWaweForms(MySubgroup(Gamma0(1))
sage: M.Weyl_law_Np(10)
"""
(c1,c2,c3,c4,c5)=self._Weyl_law_const
cc1=RR(c1); cc2=RR(c2); cc3=RR(c3); cc4=RR(c4); cc5=RR(c5)
#print "c1,c2,c3,c4,c5=",c1,c2,c3,c4,c5
NpT=2.0*cc1*T-cc2*(ln(T)+1.0)+cc3+cc4
return RR(NpT)
def set_default_parameters(self,R,Mset=0,Yset=None,ndigs=12):
res=dict()
#R=self._R
eps=RR(10)**RR(-ndigs)
if Yset<>None:
Y=float(Yset)
#print "R=",R,"Y=",Y,"eps=",eps
M0=get_M_for_maass_dp(float(R),float(Y),float(eps))
else:
M0 = self.smallest_M0()
YY = get_Y_for_M_dp(self,R,M0,eps)
#YY = float(self._group.minimal_height()*0.95)
#M0 = get_M_for_maass_dp(R,YY,eps)
#[YY,M0]=find_Y_and_M(self._group,R,ndigs)
if ndigs>=15:
Y=mpmath.mp.mpf(YY)
else:
Y=YY
if Mset > 0:
M=Mset
else:
M=M0
Q=M+10
res['Q']=Q
res['M']=M
res['Y']=Y
return res
def set_norm(self,k=1,cuspidal=True,sym_type=None,set_c=[],atkin_lehner={}):
r""" Set normalization for computing maass forms.
INPUT:
- ``k`` -- dimension
- ``cuspidal`` -- cuspidal maass waveforms (default=True)
OUTPUT:
- ``N`` -- normalization (dictionary)
-- N['comp_dim'] -- dimension of space we are computing in
-- N['SetCs']`` -- which coefficients are set
-- N['Vals'] -- values of set coefficients
EXAMPLES::
sage: set_norm_maass(1)
{'Vals': {0: {0: 0, 1: 1}}, 'comp_dim': 1, 'num_set': 2, 'SetCs': [0, 1]}
sage: set_norm_maass(1,cuspidal=False)
{'Vals': {0: {0: 1}}, 'comp_dim': 1, 'num_set': 1, 'SetCs': [0]}
sage: set_norm_maass(2)
{'Vals': {0: {0: 0, 1: 1, 2: 0}, 1: {0: 0, 1: 0, 2: 1}}, 'comp_dim': 2, 'num_set': 3, 'SetCs': [0, 1, 2]}
"""
if set_c<>[] and set_c<>None:
raise NotImplementedError,"We haven't implemented set c yet!"
C=dict()
Vals=dict()
# set coeffs c(0),c(1),...,c(k-1) if not cuspidal
# set coeffs c(0)=0,c(1),...,c(k) if cuspidal
SetCs=dict()
if cuspidal and k>0:
for j in range(k):
SetCs[j]=[]
for l in range(0,k+1):
SetCs[j].append((0,l))
#SetCs[j]=range(0,k+1)
for i in range(1,self.group().ncusps()):
if SetCs[j].count((i,0))==0 and self.alpha(i)[0]==0:
SetCs[j].append((i,0))
else:
for j in range(k):
SetCs[j]=[]
for l in range(0,k):
SetCs[j].append((0,l))
#SetCs[j]=range(0,k)
# if(cuspidal): # have to set other cusps too
# for i in range(1,len(G.cusps())+1):
# SetCs.append(0+Ml*i)
if cuspidal:
C['cuspidal']=True
else:
C['cuspidal']=False
for j in range(k):
Vals[j]=dict()
for r,n in SetCs[j]:
Vals[j][(r,n)]=0
## Set all valued = 0 first
for j in range(k):
if cuspidal:
Vals[j][(0,j+1)]=1
else:
Vals[j][(0,j)]=1
# Make sure that all constant terms are set to 0
#if cuspidal:
# for i in range(1,self.group().ncusps()):
# Vals[j][(i,0)]=0
C['comp_dim']=k
C['SetCs']=SetCs
C['Vals']=Vals
if sym_type <> None:
C['sym_type'] = sym_type
if atkin_lehner and isinstance(atkin_lehner,dict):
C['atkin_lehner']=atkin_lehner
return C
def set_norm2(self,k=1,cuspidal=True,sym_type=None,atkin_lehner={},use_c=[]):
r""" Set normalization for computing maass forms.
INPUT:
- ``k`` -- dimension
- ``cuspidal`` -- cuspidal maass waveforms (default=True)
- ``use_c`` -- which coefficients to use
OUTPUT:
- ``N`` -- normalization (dictionary)
-- N['comp_dim'] -- dimension of space we are computing in
-- N['SetCs']`` -- which coefficients are set
-- N['Vals'] -- values of set coefficients
EXAMPLES::
sage: set_norm_maass(1)
{'Vals': {0: {0: 0, 1: 1}}, 'comp_dim': 1, 'num_set': 2, 'SetCs': [0, 1]}
sage: set_norm_maass(1,cuspidal=False)
{'Vals': {0: {0: 1}}, 'comp_dim': 1, 'num_set': 1, 'SetCs': [0]}
sage: set_norm_maass(2)
{'Vals': {0: {0: 0, 1: 1, 2: 0}, 1: {0: 0, 1: 0, 2: 1}}, 'comp_dim': 2, 'num_set': 3, 'SetCs': [0, 1, 2]}
"""
C=dict()
Vals=dict()
if len(use_c)==0:
if cuspidal == 0:
use_c = range(k)
else:
use_c = range(1,k+1)
if len(use_c)<>k:
raise ArithmeticError,"Need the same number of coefficients to use as the dimension! Got dim={0}, use_c={1}".format(k,use_c)
# set coeffs c(0),c(1),...,c(k-1) if not cuspidal
# set coeffs c(0)=0,c(1),...,c(k) if cuspidal
SetCs=dict()
for j in range(k):
SetCs[j]=[]
for l in range(k):
SetCs[j].append((0,use_c[l]))
if cuspidal:
for i in range(0,self.group().ncusps()):
if SetCs[j].count((i,0))==0 and self.alpha(i)[0]==0:
SetCs[j].append((i,0))
if cuspidal:
C['cuspidal']=True
else:
C['cuspidal']=False
for j in range(k):
Vals[j]=dict()
for r,n in SetCs[j]:
Vals[j][(r,n)]=0
## Set all valued = 0 first
for j in range(k):
Vals[j][(0,use_c[j])]=1
# Make sure that all constant terms are set to 0
#if cuspidal:
# for i in range(1,self.group().ncusps()):
# Vals[j][(i,0)]=0
C['comp_dim']=k
C['SetCs']=SetCs
C['Vals']=Vals
if sym_type <> None:
C['sym_type'] = sym_type
if atkin_lehner and isinstance(atkin_lehner,dict):
C['atkin_lehner']=atkin_lehner
return C
#### Split an interv
def split_interval(self,R1,R2):
r"""
Split an interval into pieces, each containing (on average) at most one
eigenvalue as well as a 0<Y<Y0 s.t. K_IR(Y) has no zero here
INPUT:
- ''R1'' -- real
- ''R2'' -- real
OUPUT:
- list of triplets (r1,r2,y) where [r1,r2] does not contain a zero of K_ir(y)
EXAMPLES::
sage: M._next_kbes
sage: l=M.split_interval(9.0,11.0)
sage: print l[0],'\n',l[1],'\n',l[2],'\n',l[3]
(9.00000000000000, 9.9203192604549457, 0.86169527676551638)
(9.9203192704549465, 10.135716354265259, 0.86083358148875089)
(10.13571636426526, 10.903681677771321, 0.86083358148875089)
(10.903681687771321, 11.0000000000000, 0.85997274790726208)
"""
import mpmath
# It is enough to work with double precision
base=mpmath.fp
pi=base.pi
# First we find the next zero
# First split into intervals having at most one zero
ivs=list()
rnew=R1; rold=R1
while (rnew < R2):
rnew=min(R2,self.next_eigenvalue(rold))
if( abs(rold-rnew)==0.0):
if self._verbose>0:
print "ivs=",ivs
exit
iv=(rold,rnew)
ivs.append(iv)
rold=rnew
# We now need to split these intervals into pieces with at most one zero of the K-Bessel function
Y00=base.mpf(0.995)*base.sqrt(base.mpf(3))/base.mpf(2 *self._group._level)
new_ivs=list()
for (r1,r2) in ivs:
if self._verbose>0:
print "r1,r2=",r1,r2
Y0=Y00; r11=r1
i=0
while(r11 < r2 and i<1000):
t=self._next_kbessel_zero(r11,r2,Y0*pi);i=i+1
if self._verbose>0:
print "t=",t
oiv=(r11,t,Y0); new_ivs.append(iv)
# must find Y0 s.t. |besselk(it,Y0)| is large enough
Y1=Y0
#k=base.besselk(base.mpc(0,t),Y1).real*mpmath.exp(t*0.5*base.pi)
k=besselk_dp(RR(t),Y1)*exp(t*0.5*RR.pi())
j=0
while(j<1000 and abs(k)<1e-3):
Y1=Y1*0.999;j=j+1
#k=base.besselk(base.mpc(0,t),Y1).real*mpmath.exp(t*0.5*base.pi)
k=besselk_dp(RR(t),Y1)*exp(t*0.5*RR.pi())
Y0=Y1
r11=t+1E-08
return new_ivs
def _next_kbessel_zero(self,r1,r2,y):
r"""
The first zero after r1 i the interval [r1,r2] of K_ir(y),K_ir(2y)
INPUT:
- ´´r1´´ -- double
- ´´r2´´ -- double
- ´´y´´ -- double
OUTPUT:
- ''double''
EXAMPLES::
sage: M=MaassWaveForms(MySubgroup(Gamma0(1)))
sage: Y0=0.995*sqrt(3.0)/2.0
sage: M._next_kbessel_zero(9.0,15.0,Y0)
9.9203192604549439
sage: M._next_kbessel_zero(9.921,15.0,Y0)
10.139781183668587
CAVEAT:
The rootfinding algorithm is not very sophisticated and might miss roots
"""
base=mpmath.fp
h=(r2-r1)/500.0
t1=-1.0; t2=-1.0
r0=base.mpf(r1)
kd0=my_kbes_diff_r(r0,y,base)
#print "r0,y=",r0,y,kd0
while(t1<r1 and r0<r2):
# Let us first find a R-value for which the derivative changed sign
kd=my_kbes_diff_r(r0,y,base)
i=0
while(kd*kd0>0 and i<500 and r0<r2):
i=i+1
r0=r0+h
kd=my_kbes_diff_r(r0,y,base)
#print "r0,kd=",r0,kd
#print "kd*kd0=",kd*kd0
#print "-r0,y,kd=",r0,y,kd
#t1=base.findroot(lambda x : base.besselk(base.mpc(0,x),base.mpf(y),verbose=True).real,r0)
try:
t1=base.findroot(lambda x : my_kbes(x,y,base),r0)
except ValueError:
t1=base.findroot(lambda x : my_kbes(x,y,mpmath.mp),r0)
r0=r0+h
if(r0>=r2 or t1>=r2):
t1=r2
r0=r1
kd0=my_kbes_diff_r(r0,y,base)
while(t2<r1 and r0<r2):
kd=my_kbes_diff_r(r0,y,base)
i=0
while(kd*kd0>0 and i<500 and r0<r2):
i=i+1
r0=r0+h
kd=my_kbes_diff_r(r0,y,base)
try:
t2=base.findroot(lambda x : my_kbes(x,2*y,base),r0)
except ValueError:
t2=base.findroot(lambda x : my_kbes(x,2*y,mpmath.mp),r0)
#t2=base.findroot(lambda x : base.besselk(base.mpc(0,x),base.mpf(2*y),verbose=True).real,r0)
r0=r0+h
if(r0>=r2 or t2>=r2):
t2=r2
#print "zero(besselk,y1,y2)(",r1,r2,")=",t1,t2
t=min(min(max(r1,t1),max(r1,t2)),r2)
return t
def Hecke_matrix(self,F,p):
r"""
Make the matrix of T_p with respect to the basis F.
Here F is assumed to be a 0-1 normalized basis, s.t.
F[i]=[a[0],a[1],...,a[M0]]
and
F[0]=[0,a[1],0,...,0,a[d+1],...]
F[1]=[0,0,a[2],0,...,0,a[d+1],...]
...
F[d]=[0,0,0,...,0,a[d],a[d+1],...]
"""
#if Integer(p).divides(self._level):
# raise NotImplementedError,"Have only implemented primitive Hecke operators. Got q={0}|{1}".format(p,self._group._level)
dim =len(F)
assert self == F[0]._space
if p*dim>len(F[0]._coeffs[0][0]):
raise ValueError,"Need smaller p or more coefficients!"
x=self.multiplier().character()
if isinstance(F[0]._coeffs[0][0][1],(complex,float)):
prec=53
elif hasattr(F[0]._coeffs[0][0][1],"parent"):
prec=F[0]._coeffs[0][0][1].parent().prec()
CF = MPComplexField(prec)
MS=MatrixSpace(CF,dim,dim)
Tp = Matrix_complex_dense(MS,0)
for i in range(dim):
for j in range(dim):
c=F[i]._coeffs[0][0][p*(j+1)]
tmp=CF(c)
if (j+1) % p ==0:
c = F[i]._coeffs[0][0][ZZ((j+1)/p)]
tmp+=x(p)*CF(c)
Tp[i,j]=tmp
return Tp
def Hecke_eigenfunction(self,F,p,fnr=-1,verbose=0):
r"""
Construct a Hecke eigenfunction of T_p from the basis vector F
If coeffs_only=1 then we only return the coefficients of the first component and/or cusp,\
otherwise we return a MaassWaveformElement
If fnr<0 we return a vector of all eigenfunctions
"""
assert isinstance(F,(list,dict))
## If F is an Hecke eigenform we return it, otherwise return None
#return F ### In a one-
assert self==F[0]._space
## Test if we already have Hecke eigenfunctions.
is_hecke=1
for j in range(len(F)):
err = F[0].test(method='Hecke',format='float')
if err>1E-6:
is_hecke=0
break
if is_hecke==1:
if verbose>0:
print "We already have Hecke eigenfunctions!"
return F
C={}
for j in range(len(F)):
C[j]=F[j]._coeffs[0] ## We assume scalar-valued functions
#M=F[0]._space
Cnew=self.Hecke_eigenfunction_from_coeffs(C,p,cusps='all',
fnr=fnr,verbose=verbose)
#return Cnew
res=[];
for j in Cnew.keys():
FF=copy(F[0]) ## To get the same basic properties
FF._coeffs={0:Cnew[j]}
# If F[0] had a symmetry type set we try to find one for this too...
FF._sym_type=FF.find_sym_type()
res.append(FF)
return res
# # l=(Tp.transpose()).eigenvectors()[fnr]
# #print "l=",l
# ev=l[0]
# try:
# v=l[1][0]
# except KeyError:
# print "l=",l
# raise ArithmeticError,"Problem computing eigenvectors!"
# #print "v=",v
# C1=dict()
# if len(v)==2:
# res = F[0]._lin_comb(F[1],v[0],v[1])
# else:
# res=F[0]*v[0]
# for j in range(1,len(v)):
# res=res+F[j]*v[j]
# res = res*(1/v[0])
# return res
#for j in range(F._coeffs[0][0]):
def Hecke_matrix_from_coeffs(self,C,p):
r"""
Make the matrix of T_p with respect to the basis F.
Here F is assumed to be a 0-1 normalized basis, s.t.
F[i]=[a[0],a[1],...,a[M0]]
and
F[0]=[0,a[1],0,...,0,a[d+1],...]
F[1]=[0,0,a[2],0,...,0,a[d+1],...]
...
F[d]=[0,0,0,...,0,a[d],a[d+1],...]
"""
dim = len(C)
if p*dim>len(C[0][0]):
raise ValueError,"Need smaller p or more coefficients!\n Got: p={0} dim={1}, len(C)={2}".format(p,dim,len(C[0][0]))
#assert p>dim:
x=self.multiplier().character()
#if Integer(p).divides(self.level()):
# raise NotImplementedError,"Have only implemented primitive Hecke operators. Got q={0}|{1}".format(p,self._group._level)
#print "C001=",C[0][0][1]
if isinstance(C[0][0][1],(complex,float)):
prec=53
# Tp = Matrix(CC,dim,dim)
elif hasattr(C[0][0][1],"parent"):
prec=C[0][0][1].parent().prec()
# Tp = Matrix(F[0]._coeffs[0][0][1].parent(),dim,dim)
else:
prec=53
CF = MPComplexField(prec)
MS=MatrixSpace(CF,dim,dim)
Tp = Matrix_complex_dense(MS,0)
xp = x(p).complex_embedding(prec)
for i in range(dim):
for j in range(dim):
c=C[i][0][p*(j+1)]
tmp=CF(c)
if (j+1) % p ==0:
#print "adding c[",ZZ((j+1)/p)
c = C[i][0][ZZ((j+1)/p)]
tmp+=xp*CF(c)
Tp[i,j]=tmp
return Tp
def Hecke_eigenfunction_from_coeffs(self,C,p,cusps='all',fnr=-1,verbose=0):
r"""
Construct a Hecke eigenfunction of T_p from the basis vector F
If coeffs_only=1 then we only return the coefficients of the first component and/or cusp,\
otherwise we return a MaassWaveformElement
If cusps = i we only compute coefficients at cusp i (usualle used if we only want coeffs. at infinity)
"""
assert isinstance(C,(list,dict))
assert dict_depth(C)>=3
Tp=self.Hecke_matrix_from_coeffs(C,p)
#while Tp.det()<Tp.eps():
# if verbose>0:
# print "Matrix is near singular, change p!"
#
if self._character(p)==-1:
sorting=-1
else:
sorting=1
if verbose>1:
print "Tp=",Tp
try:
l=(Tp.transpose()).eigenvectors(verbose=0,sorted=sorting)
except ArithmeticError:
raise ArithmeticError, "Could not compute eigenvectors of the Hecke matrix T_{0}:\n{1}".format(p,Tp)
if self._verbose>1 or verbose>0:
for ll in l:
print ll
if fnr<0 or fnr>len(l): # compute all eigenfunctions
fstart=0; fstop=len(l)
else:
fstart=fnr; fstop=fnr+1
res=dict(); jj=0
for j in range(fstart,fstop):
ev=l[j][0]
if self._verbose>1:
print "ev=",ev
if not isinstance(l[j][1],list):
print "Tp=",Tp
print "l=",l
raise ArithmeticError,"Problem computing eigenvectors!"
#if len(l[j][1])>1:
# raise ArithmeticError,"Eigenspace seems to be more than one-dimensional! For p={0} in the space {1}. \n eigenvalue={2} and vector={3}".format(p,self,ev,l[j][1])
for v in l[j][1]:
#v=l[j][1][0] # Eigenvector
if self._verbose>1:
print "v=",v
CF=ComplexField(v[0].prec())
# Get a normalizing coefficient
v_norm=0
for i in range(len(v)):
if abs(v[i])>0:
v_norm=CF(v[i].real(),v[i].imag())
break
if v_norm==0:
## Presumably we had a too large-dimensional space
continue
# raise ArithmeticError,"Could not find non-zero Hecke eigenvector! \n Hecke matrix is:{0} \n".format(Tp.transpose())
res[jj]=dict()
for i in C[0].keys():
if cusps<>'all' and cusps<>i:
continue
if self._cusp_evs_dict.get(i,0)<>0 and i>0:
res[jj][i]=self._cusp_evs_dict.get(i,0)
continue
res[jj][i]=dict()
# print "C[0][",i,"]=",C[0][i]
for n in C[0][i].keys():
res[jj][i][n]=CF(0)
for k in range(len(v)):
vj=CF(v[k].real(),v[k].imag())
res[jj][i][n]=res[jj][i][n]+vj*C[k][i][n]
res[jj][i][n]=res[jj][i][n]/v_norm
jj+=1
return res
def max_assumed_dim(self):
r"""
If the character has components of order two the dimension
of a generic type Maass form doubles.
Similarly if N has square factors the dimension of generic spaces
might increase by multiples of two.
"""
d=1
if not self._group._is_congruence:
return d
x = self._multiplier._character
if not x.is_trivial():
for xx in x.decomposition():
if xx.order()<=2:
d=d*2
for p,m in self._group.level().factor():
if m>1:
d=d*2
return d
def get_primitive_p(self,p0=0,notone=1):
r"""
Gives a prime p to use for Hecke operator on M
p should be relative prime to the level of M._group
and to the modulus of M._multiplier._character
INPUT:
- 'p0' -- return prime greater than p0
- 'notone' -- if set to one we return a prime with chi(p)<>1
"""
if not self._group._is_congruence:
return next_prime(p0)
m=self._multiplier
x=m._character
if hasattr(x,"modulus"):
modulus=x.modulus()
else:
modulus=1
prim_to=lcm(self._group._level,modulus)
p00 = next_prime(p0)
p01 = p00 + prim_to
if notone:
if self._group._level % 9 ==0 :
pq=3
# celif self._group._level % 4 ==0 :
# pq=4
else:
pq=1
for p in prime_range(p00,p01+1):
if notone==1 and p%pq==1:
continue
if gcd(p,prim_to)==1:
return p
raise ArithmeticError," Could not find appropriate p rel. prime to {0}!".format(prim_to)
def smallest_M0(self):
r"""
Smallest M0 which we can use if we want to test using Hecke relations.
"""
if is_Hecke_triangle_group(self._group):
self._smallest_M0=int(12*self._group._lambdaq)
if self._smallest_M0>0:
return self._smallest_M0
a = self.get_primitive_p()
b = self.get_primitive_p(a)
c = a*b
self._smallest_M0=c+3
return self._smallest_M0
def test_Hecke_relation(self,C={},a=0,b=0):
r"""Testing Hecke relations for the Fourier coefficients in C
INPUT:
-''C'' -- dictionary of complex (Fourier coefficients)
-''a'' -- integer
-''b'' -- integer
OUTPUT:
-''diff'' -- real : |C(a)C(b)-C(ab)| if (a,b)=1
EXAMPLE::
sage: S=MaassWaveForms(Gamma0(1))
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: Y=mpmath.mpf(0.85)
sage: C=coefficients_for_Maass_waveforms(S,R,Y,10,20,12)
sage: d=_test_Hecke_relations(C,2,3); mppr(d)
'9.29e-8'
sage: C=coefficients_for_Maass_waveforms(S,R,Y,30,50,20)
sage: d=_test_Hecke_relations(C,2,3); mppr(d)
'3.83e-43'
"""
if a*b==0:
a = self.get_primitive_p()
b = self.get_primitive_p(a)
c=gcd(Integer(a),Integer(b))
if self._verbose>1:
print "Test Hecke: a={0},b={1},gcd(a,b)={2}".format(a,b,c)
if not C.has_key(0):
return -1
if not hasattr(C[0],"has_key"):
C = {0:C}
if C[0].has_key(a) and C[0].has_key(b) and C[0].has_key(a*b):
lhs=C[0][a]*C[0][b]
rhs=0
for d in divisors(c):
if self._ch<>0:
x = self._character(d)
else:
x = 1
m = Integer(a*b/d/d)
if self._verbose>1:
print "rhs+=c*C[0][{0}]={1}".format(m,x*C[0][m])
rhs=rhs+x*C[0][m]
if self._verbose>1:
print "|rhs|=",abs(rhs)
print "|lhs|=",abs(lhs)
print "self._prec=",self._prec
if max(abs(rhs),abs(lhs))<max(1e-8,2.0**(-0.5*self._prec)):
return -1
if abs(lhs)>0:
return abs(rhs/lhs-1.0)
return -1
def scattering_determinant(self,s):
r"""
Computes the scattering determinant, varphi(s), of self at s
using the non-holomorphic Eisenstein series
Only implemented for Hecke triangle groups at the moment.
"""
if not is_Hecke_triangle_group(self._group):
raise NotImplementedError,"Only implemented for Hecke triangle groups"
E = EisensteinSeries(self,s,verbose=self._verbose)
return E._coeffs[0][0]
## def my_kbes_diff_r(r,x,mp_ctx=None):
## r"""
## Approximation to the derivative with respect to R of the scaled K-Bessel function.
## INPUT:
## - ''r'' -- real
## - ''x'' -- real
## - ''ctx'' -- mpmath context (default mpmath.mp)
## OUTPUT:
## - real -- K_ir(x)*exp(pi*r/2)
## EXAMPLES::
## sage: my_kbes_diff_r(9.45,0.861695276766 ,mpmath.fp)
## -0.31374673969963851
## sage: my_kbes_diff_r(9.4,0.861695276766 ,mpmath.fp)
## 0.074219541623676832
## """
## import mpmath
## if(mp_ctx==None):
## mp_ctx=mpmath.mp
## if(mp_ctx==mpmath.mp):
## pi=mpmath.mp.pi()
## else:
## pi=mpmath.fp.pi
## try:
## k=mp_ctx.besselk(mp_ctx.mpc(0,r),ctx.mpf(x))
## f=k*mp_ctx.exp(r*mp_ctx.mpf(0.5)*pi)
## except OverflowError:
## k=mp_ctx.besselk(mp_ctx.mpc(0,r),mp_ctx.mpf(x))
## f=k*mp_ctx.exp(r*mp_ctx.mpf(0.5)*pi)
## f1=f.real
## try:
## h=mp_ctx.mpf(1e-8)
## k=mp_ctx.besselk(mp_ctx.mpc(0,r+h),mp_ctx.mpf(x))
## f=k*mp_ctx.exp((r+h)*mp_ctx.mpf(0.5)*pi)
## except OverflowError:
## h=mp_ctx.mpf(1e-8)
## k=mp_ctx.besselk(mp_ctx.mpc(0,r+h),mp_ctx.mpf(x))
## f=k*mp_ctx.exp((r+h)*mp_ctx.mpf(0.5)*pi)
## f2=f.real
## diff=(f2-f1)/h
## return diff
#class MaassWaveformElement (SageObject):
class MaassWaveformElement(AutomorphicFormElement): #(Parent):
r"""
An element of a space of Maass waveforms
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: F=MaassWaveformElement(G,R)
Maass waveform with parameter R=9.5336952613536
in Space of Maass waveforms on the group G:
Arithmetic Subgroup of PSL2(Z) with index 1. Given by:
perm(S)=()
perm(ST)=()
Constructed from G=Modular Group SL(2,Z)
sage: G=MySubgroup(Gamma0(4))
sage: R=mpmath.mpf(3.70330780121891)
sage: F=MaassWaveformElement(G,R);F
Maass waveform with parameter R=3.70330780121891
Member of the Space of Maass waveforms on the group G:
Arithmetic Subgroup of PSL2(Z) with index 6. Given by:
perm(S)=(1,2)(3,4)(5,6)
perm(ST)=(1,3,2)(4,5,6)
Constructed from G=Congruence Subgroup Gamma0(4)
sage: F.C(0,-1)
mpc(real='-1.0000000000014575', imag='5.4476887980094281e-13')
sage: F.C(0,15)-F.C(0,5)*F.C(0,3)
mpc(real='-5.938532886679327e-8', imag='-1.0564743382278074e-8')
sage: F.C(0,3)
mpc(real='0.53844676975670527', imag='-2.5525466782958545e-13')
sage: F.C(1,3)
mpc(real='-0.53844676975666916', imag='2.4484251009604091e-13')
sage: F.C(2,3)
mpc(real='-0.53844676975695485', imag='3.3624257152434837e-13')
"""
def __init__(self,G,R,C=None,nd=12,sym_type=None,cusp_evs={},verbose=None,prec=53,set_c=None,dim=1,compute=False,**kwds):
r"""
Construct a Maass waveform on thegroup G with spectral parameter R and coefficients C
INPUT:
- ``G`` -- Group
- ``R`` -- Spectral parameter
- ``C`` -- Fourier coefficients (default None)
- ``nd``-- Number of desired digits (default 15)
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: F=MaassWaveformElement(G,R)
Maass waveform with parameter R=9.5336952613536
in Space of Maass waveforms on the group G:
Arithmetic Subgroup of PSL2(Z) with index 1. Given by:
perm(S)=()
perm(ST)=()
Constructed from G=Modular Group SL(2,Z)
sage: G=MySubgroup(Gamma0(4))
sage: R=mpmath.mpf(3.70330780121891)
sage: F=MaassWaveformElement(G,R);F
Maass waveform with parameter R=3.70330780121891
Member of the Space of Maass waveforms on the group G:
Arithmetic Subgroup of PSL2(Z) with index 6. Given by:
perm(S)=(1,2)(3,4)(5,6)
perm(ST)=(1,3,2)(4,5,6)
Constructed from G=Congruence Subgroup Gamma0(4)
sage: F.C(0,-1)
mpc(real='-1.0000000000014575', imag='5.4476887980094281e-13')
sage: F.C(0,15)-F.C(0,5)*F.C(0,3)
mpc(real='-5.938532886679327e-8', imag='-1.0564743382278074e-8')
sage: F.C(0,3)
mpc(real='0.53844676975670527', imag='-2.5525466782958545e-13')
sage: F.C(1,3)
mpc(real='-0.53844676975666916', imag='2.4484251009604091e-13')
sage: F.C(2,3)
mpc(real='-0.53844676975695485', imag='3.3624257152434837e-13')
"""
import mpmath
if hasattr(G,"_is_maass_waveform_space"):
self._space=G
else:
self._space= MaassWaveForms (G)
self._group=self._space._group
if cusp_evs<>{}:
self._space.set_cusp_evs(cusp_evs)
self._cusp_evs = self._space._cusp_evs
self._cusp_evs_dict = self._space._cusp_evs_dict
self._R=R
self._nd=nd
self._set_c=set_c
if sym_type<>None:
self._space.set_sym_type(sym_type)
self._sym_type=self._space._sym_type
self._dim=dim
self._version=1 ## Might be necessary in the future
AutomorphicFormElement.__init__(self,self._space,C=None,prec=prec,principal_part={},verbose=verbose)
if nd>15:
mpmath.mp.dps=nd
self.mp_ctx=mpmath.mp
else:
self.mp_ctx=mpmath.fp
## We use the Fourier coefficients to verify whether we really have an eigenvalue
self._coeffs={}
## If self is constructed as a Hecke eigenform with respect
## to T_p we don't want to use p for testing.
self._from_hecke_p=kwds.get('hecke_p',0)
if C<>None:
if dict_depth(C)==3:
self._coeffs=C
self._M0 = max(C[0][0].keys())
else:
self._coeffs[0]=C
self._M0 = max(C[0].keys())
er=self.test()
else:
dprec=2.**-nd
self._M0=get_M_for_maass(self._R,
self._group.minimal_height(),dprec)
#print "Need to compute coefficients!"
# We compute a set of Fourier coefficients
#(Y,M)=find_Y_and_M(G,R)
#Q=M+10
self._coeffs=dict()
if compute:
self._coeffs=self.get_coeffs() #Maassform_coeffs(self._space,R,ndigs=self._nd)[0]
else:
self._coeffs[0]=dict()
for j in range(self._group.ncusps()):
self._coeffs[0][j]=dict()
#self._cusp_evs=[]
#self._cusp_evs_dict={}
def _repr_(self):
r""" Returns string representation of self.
EXAMPLES::
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: F=MaassWaveformElement(Gamma0(1),R,nd=50);F
Maass waveform with parameter R=9.5336952613535575543442352359287703238212563951073
Member of the Space of Maass waveforms on the group G:
Arithmetic Subgroup of PSL2(Z) with index 1.Given by
perm(S)=()
perm(ST)=()
Constructed from G=Modular Group SL(2,Z)
"""
s="Maass waveform with parameter R="+str(self._R)+". Symmetry: "
if self._sym_type==1:
sym = "odd "
elif self._sym_type==0:
sym = "even "
else:
sym = ""
s+=sym
if self._cusp_evs<>[]:
s+="Atkin-Lehner eigenvalues at cusps:"+str(self._cusp_evs_dict)
s+="\nMember of the "+str(self._space)
return s
def __reduce__(self):
r""" Used for pickling.
"""
return(MaassWaveformElement,(self._space,self._R,self._coeffs,self._nd,self._sym_type,self._cusp_evs,self._verbose,self._prec))
def group(self):
r"""
Return self._group
"""
return self._group
def level(self):
return self._group.level()
def eigenvalue(self):
return self._R #eigenvalue
def find_sym_type(self,tol=1e-7):
cnr=1
st_old=-1
c0 = self._coeffs[0][0][1]; cnr=1
for k in range(self._M0):
if abs(abs(self._coeffs[0][0][k])-1.0)<tol:
c0 = self._coeffs[0][0][k]; cnr=k
break
if k>=self._M0:
print "Could not find c[k] close to 1!"
eosym = self._space.even_odd_symmetries()
for j in eosym.keys():
s,d = eosym[j]
if s==0:
continue
c1=self._coeffs[0][j][cnr]
if abs(c1-c0*d)<tol:
st=0
elif abs(c1+c0*d)<tol:
st=1
else:
return -1
if st<>st_old and st_old<>-1:
return -1
st_old=st
def C(self,i,j=None,r=0):
r"""
Return the coefficient C(i,j) i.e. coefficient nr. j at cusp i (or if j=none C(1,i))
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: F=MaassWaveformElement(G,R)
sage: F.C(2)
mpc(real='-1.068333551223568', imag='2.5371356217909904e-17')
sage: F.C(3)
mpc(real='-0.45619735450601293', imag='-7.4209294760716175e-16')
sage: F.C(2)*F.C(3)-F.C(6)
mpc(real='8.2470016583210667e-8', imag='1.6951583479643061e-9')
"""
if(j==None):
cusp=0
j=i
else:
cusp=i
if(not self._coeffs[r].has_key(cusp)):
raise ValueError," Need a valid index of a cusp as first argument! I.e in %s" %self._coeffs.keys()
if(not self._coeffs[r][cusp].has_key(j)):
return None
return self._coeffs[r][cusp][j]
def test(self,method='Hecke',up_to_M0=0,format='digits',verbose=0):
r""" Return the number of digits we believe are correct (at least)
INPUT:
- method -- string: 'Hecke' or 'pcoeff' or 'TwoY'
- format = 'digits' or 'float'
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: F=MaassWaveformElement(G,R)
sage: F.test()
7
"""
# If we have a Gamma_0(N) we can use Hecke operators
verbose = max(verbose,self._space._verbose)
if self.level()==1:
method='Hecke'
if method=='Hecke' and self._space._group.is_congruence():
#a = self._space.get_primitive_p()
#b = self._space.get_primitive_p(a)
p = self._from_hecke_p
a = self._space.get_primitive_p(p)
b = self._space.get_primitive_p(a)
if verbose>1:
print "Check Hecke relations! a={0}, b={1}".format(a,b)
if len(self._coeffs)>b+3:
if verbose>=0:
print "Too few coefficients for test!"
return 1
er=self._space.test_Hecke_relation(self._coeffs[0],a=a,b=b)
#print "er=",er
if er == 0:
return 0
if er==-1:
return self.test(method='pcoeff',up_to_M0=up_to_M0,format=format)
if format=='float':
return er
d=floor(-log(er,10))
if verbose>0:
print "Hecke is ok up to ",d,"digits!"
return d
elif method=='pcoeff':
#d1 = self.test(method='Hecke',format=format)
if verbose>0:
print "Testing prime coefficients!"
N = self._space._group._level
d1 = 1
x = self._space._character
for p in prime_range(N):
if x.is_trivial():
if valuation(N,p)==1:
d2 = abs(abs(self._coeffs[0][0][p])**2-RR(1)/RR(p))
if verbose>1:
print "Checking c({0})".format(p)
if d2<d1: d1=d2
elif valuation(N,p)==2:
d2 = abs(self._coeffs[0][0][p])
if d2<d1: d1=d2
else:
p = x.conductor()
d2 = abs(abs(self._coeffs[0][0][p])-1)
if d2<d1: d1=d2
return d1
else:
# Test two Y's. Since we don't know which Y we had to start with we use two new ones and compare against the coefficients we already have
nd=self._nd+5
[M0,Y0]=find_Y_and_M(self._group,self._R,nd)
Y1=Y0*0.95
C1=Maassform_coeffs(self._space,self._R,Mset=M0,Yset=Y0 ,ndigs=nd )[0]
C2=Maassform_coeffs(self._space,self._R,Mset=M0,Yset=Y1 ,ndigs=nd )[0]
er=RR(0)
for j in range(2,max(M0/2,up_to_M)):
if self._coeffs[0].has_key(j):
t1=abs(C1[j]-self._coeffs[0][j])
t2=abs(C2[j]-self._coeffs[0][j])
t = max(t1,t2)
if t==t1:
print "|C1-C[{0}]|=|{1}-{2}|={3}".format(j,C1[j],self._coeffs[0][j],t)
else:
print "|C2-C[{0}]|=|{1}-{2}|={3}".format(j,C2[j],self._coeffs[0][j],t)
else:
t=abs(C1[j]-C2[j])
print "|C2-C[{0}]|=|{1}-{2}|={3}".format(j,C1[j],C2[j],t)
if t>er:
er=t
d=floor(-log(er,10))
print "Hecke is ok up to ",d,"digits!"
return d
def eval(self,x,y,prec=1E-10):
r"""
Evaluate self.
"""
return eval_maass_lp(self,RR(x),RR(y))
def plot(self,xlim,ylim,num_pts,**kwds):
r"""
Make a plot of self.
"""
# we evaluate self over a grid, for efficiency
P=density_plot(self.eval,(-0.5,0.5),(0.01,1.01),plot_points=100,axes=False,**kwds)
# P=density_plot(f,(-0.5,0.5),(0.01,1.01),plot_points=120,axes=False,**kwds)
return P
(xmin,xmax,Nx)=xlim
(ymin,ymax,Ny)=ylim
hy = (ymax-ymin)/RR(Ny)
hx = (xmax-xmin)/RR(Nx)
yvec=dict()
#assert self._G ==
for i in range(Nx):
y=ymin+i*hy
for n in range(1,M0):
yvec[n]=besselk_dp(self.R,RR(2*pi*n))
for j in range(Ny):
x=xmin+i*hx
w=0
argx=CC(2*pi*x*I)
for n in range(M0):
term=self.C[n]*exp(argx*N)
def get_coeffs(self,Mset=0 ,Yset=None,ndigs=12,twoy=None,overwrite=False,dim=1):
r"""
Compute M Fourier coefficients (at each cusp) of a Maass (cusp)
waveform with eigenvalue R for the group G.
INPUT:
- ''S'' -- space of Maass waveforms
- ''R'' -- Real number with precision prec
- ''Mset'' -- integer : number of desired coefficients
- ''ST'' -- set symmetry
- ''Yset'' -- real
- ''ndigs''-- integer
-``overwrite`` -- set to True to overwrite old coefficients
OUTPUT:
-''D'' -- dictionary of Fourier coefficients
EXAMPLES:
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: sage: M=MaassWaveForms(Gamma0(1))
sage: sage: C=Maassform_coeffs(M,R)
"""
S=self._space
R=self._R
G=S._group
import mpmath
if(S._verbose>1):
print "S=",S
dold=mpmath.mp.dps
param=self._space.set_default_parameters(R,Mset,Yset,ndigs)
if Yset:
Y=Yset
else:
Y=param['Y']*0.5
Q=param['Q']
M=param['M']
sym_type=self._sym_type
#dim=self._dim
set_c=self._set_c
Norm = S.set_norm(dim)
if S._verbose>1:
print "R,Y,M,Q=",R,Y,M,Q
print "sym_type=",sym_type
print "Norm=",Norm
# print "nd=",mpmath.mp.dps
do_cplx=1
if S.multiplier().is_real() and sym_type in [0,1]:
do_cplx=0
#if self._dim>1:
# raise NotImplementedError,"Vector-valued Maass waveforms are currently not implemented!"
if ndigs<=15:
if do_cplx:
X=get_coeff_fast_cplx_dp_sym(S,RR(R),RR(Y),int(M),int(Q),Norm)
else:
X=get_coeff_fast_real_dp(S,RR(R),RR(Y),int(M),int(Q),Norm)
else:
raise NotImplementedError,"High precision is currently not (efficiently) inplemented!"
## The parameters used to compute the current set of coefficients.xs
self._M0 = M
self._Y = Y
#X = coefficients_for_Maass_waveforms(S,R,Y,M,Q,ndigs,cuspidal=True,sym_type=sym_type,dim=dim,set_c=set_c)
# If we compute more than one Maass form at one time we simply put the coefficients in the first component
# And rearrange them later in the "get_element" routine.
if overwrite or dim>1:
self._coeffs[0]=X
return
if not isinstance(self._coeffs,dict):
self._coeffs=dict()
self._coeffs[0]=dict()
if self._verbose>0:
print "X.keys()=",X.keys()
for j in X.keys():
for n in X[j].keys():
if not isinstance(self._coeffs[0][j],dict):
self._coeffs[0][j]=dict()
if self._coeffs[0][j].has_key(n):
continue
else:
self._coeffs[0][j][n]=X[j][n]
def Hecke_action(self,p):
r"""
Return T_p(F)
Note: Only Fourier coefficients at infinity are computed
"""
res = copy(self)
c = res._coeffs
x=self._space._multiplier._character
for r in res._coeffs.keys():
res._coeffs[r]=dict()
res._coeffs[r][0]=dict()
Ms = min(self._coeffs[r][0].keys())
Mf = max(self._coeffs[r][0].keys())
if Ms<0:
Ms=ceil(RR(Ms)/RR(p))
Mf=floor(RR(Mf)/RR(p))
for n in range(Ms,Mf+1):
tmp=0
if self._coeffs[r][0].has_key(n*p):
tmp += self._coeffs[r][0][n*p]
if (n%p)==0:
m = Integer(n/p)
if self._coeffs[r][0].has_key(m):
tmp+=x(p)*self._coeffs[r][0][m]
res._coeffs[r][0][n]=tmp
return res
class EisensteinSeries(AutomorphicFormElement):
r"""
Non-holomorphic Eisenstein series
"""
def __init__(self,G,s,nd=12,compute=True,verbose=0):
if hasattr(G,"_is_maass_waveform_space"):
self._space=G
else:
self._space= MaassWaveForms(G,cuspidal=False)
### The working precision is determined by the input
if hasattr(s,"prec"):
prec = s.prec()
else:
prec = 53
self._prec = prec
self._verbose = verbose
CF = MPComplexField(self._prec)
RF = RealField(self._prec)
self._sigma= RF(s.real())
self._R= RF(s.imag())
self._s = CF(self._sigma,self._R)
self._ndigs = nd
self._eps = 2.0**(1-nd)
AutomorphicFormElement.__init__(self,self._space,C=None,prec=prec,principal_part={},verbose=verbose)
if compute:
self.get_coefficients()
def get_coefficients(self,Y0=0,M0=0):
## At the moment we have only implemented
## Eisenstein series for Hecke triangle groups.
if not is_Hecke_triangle_group(self._space._group):
raise NotImplementedError
Rf = float(abs(self._R))
if M0>0:
Y = get_Y_for_M_dp(self._space,Rf,M0,self._eps)
elif Y0>0:
Y = Y0
M = get_M_for_maass_dp(Rf,float(Y0),float(self._eps))
else:
Y,M = get_Y_and_M_dp(self._space,abs(self._R),self._eps)
Ymax = self._space._group.minimal_height()/self._space._group._lambdaq
if Y>Ymax:
Y=0.99*Ymax
M = get_M_for_maass_dp(float(abs(self._R)),float(Y),float(self._eps))
RF = RealField(self._prec)
Y = RF(Y)
if self._verbose>0:
print "Computing coefficients at s={0} with Y={1}, M={2}".format(self._s,Y,M)
C = Eisenstein_series_one_cusp(self._space,self._sigma,self._R,Y,M,self._verbose)
self._coeffs = {0: C}
from numpy import array
def coefficients_for_Maass_waveforms(S,R,Y,M,Q,ndigs,cuspidal=True,sym_type=None,dim=1,set_c=None):
r"""
Compute coefficients of a Maass waveform given a specific M and Y.
INPUT:
- ''S'' -- Space of Maass waveforms
- ''R'' -- real : 1/4+R*R is the eigenvalue
- ''Y'' -- real number > 9
- ''M'' -- integer
- ''Q'' -- integer
- ''cuspidal''-- logical (default True)
- ''sym_type'' -- integer (default None)
- ''ndigs''-- integer : desired precision
OUTPUT:
-''D'' -- dictionary of Fourier coefficients
EXAMPLES::
sage: S=MaassWaveForms(Gamma0(1))
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: Y=mpmath.mpf(0.85)
sage: C=coefficients_for_Maass_waveforms(S,R,Y,10,20,12)
"""
G=S.group()
if S._verbose>1:
print "R,Y,M,Q,sym_type=",R,Y,M,Q,sym_type
## Find out which method to use. I.e. real/complex/multiprec. etc.
#if ndigs<=12:
#
if ndigs<=12:
W=setup_matrix_for_Maass_waveforms(S,R,Y,M,Q,cuspidal=True,sym_type=sym_type,low_prec=True)
else:
W=setup_matrix_for_Maass_waveforms(S,R,Y,M,Q,cuspidal=True,sym_type=sym_type)
#set_c=S._set_c
#dim=S._dim ## Assumed dimension of ambient space / determines how many F-coeffs. we need to set.
N=S.set_norm(dim,cuspidal=True)
#return [W, N]
dold=mpmath.mp.dps
mpmath.mp.dps=max(dold,50) # We work with low precision initially
if(S._verbose>1):
deb=True
else:
deb=False
done=False; j=0
while(done==False and j<=10):
if(S._verbose>1):
print "Trying to solve with prec=",mpmath.mp.dps
try:
X=solve_system_for_Maass_waveforms(W,N,deb=deb)
except ZeroDivisionError:
pass
if(is_Integer(X) or isinstance(X,int)):
mpmath.mp.dps=X+5
elif(isinstance(X,dict)):
done=True
else:
raise ArithmeticError," Could not solve system!"
j=j+1
print "X.keys=",X.keys()
if(S._verbose>1):
for m in X.keys():
print "Function nr. ",m+1
for j in X[m].keys():
if(sym_type==None):
for n in range(M,1 ,-1 ):
print "C[",n,"]=",X[m][j][n]
for n in range(M):
print "C[",n,"]=",X[m][j][n]
else:
for n in range(1,M+1):
print "C[",n,"]=",X[m][j][n]
#print "C2=",X[0][2]
#print "c2c3-c6=",X[0][2]*X[0][3]-X[0][6]
mpmath.mp.dps=dold
return X
def verify_eigenvalue(S,R,nd=10,ST=None,method='TwoY'):
r""" Verify an eigenvalue and give an estimate of the error.
INPUT:
-''S'' -- Space of Maass waveforms
-''R'' -- real: (tentative) eigenvalue = 1/4+R**2
-''nd''-- integer : number of digits we try to get (at a minimum)
"""
C=Maassform_coeffs(S,R,ST=ST ,ndigs=nd)
# def find_single_ev(S,R1in,R2in,Yset=None,neps=10,method='TwoY',verbose=0):
# r""" Locate a single eigenvalue on G between R1 and R2
# INPUT:(tentative)
# - ''S'' -- space of Maass waveforms
# - ''R1in'' -- real
# - ''R1in'' -- real
# - ''Yset'' -- real (use this value of Y to compute coefficients)
# - ''neps'' -- number of desired digits
# OUPUT:
# - ''R'' --
# """
# G=S.group()
# jmax=1000 # maximal number of interation
# if(neps>=15):
# R1=mpmath.mp.mpf(R1in);R3=mpmath.mp.mpf(R2in)
# print "mpmath.mp.dps=",mpmath.mp.dps
# print "R1=",R1,type(R1)
# print "R3=",R3,type(R3)
# else:
# R1=mpmath.fp.mpf(R1in);R3=mpmath.fp.mpf(R2in)
# if(Yset==None):
# [Y,M]=find_Y_and_M(G,R1,neps)
# else:
# [Y,M]=find_Y_and_M(G,R1,neps,Yset=Yset)
# Y1=Y; Y2=mpmath.mpf(0.995)*Y1
# tol=mpmath.mpf(10)**mpmath.mpf(-neps)
# dold=mpmath.mp.dps
# mpmath.mp.dps=neps+3 # We work with low precision initially
# h=dict()
# signs=dict();diffs=dict()
# c=dict(); h=dict()
# c[1]=2 ; c[2 ]=3 ; c[3 ]=4
# #met='Hecke'
# met=method
# [diffs[1 ],h[1 ]]=functional(S,R1,M,Y1,Y2,signs,c,first_time=True,method=met,ndigs=neps)
# [diffs[3 ],h[3 ]]=functional(S,R3,M,Y1,Y2,signs,c,first_time=True,method=met,ndigs=neps)
# if S._verbose>1:
# print "diffs: met=",met
# print "R1=",R1
# print "R3=",R3
# for n in list(c.keys()): #.sort():
# for j in list(diffs.keys()): #.sort():
# print "diff[",j,c[n],"]=",diffs[j][n]
# # Sset signs and check zeros
# if met=='Hecke':
# if(h[1 ]*h[3]>mpmath.eps()):
# # We do not have a signchange
# return [0 ,0 ]
# else:
# var=0.0
# for j in range(1 ,3 +1 ):
# var+=abs(diffs[1 ][j])+abs(diffs[3 ][j])
# print "var=",var
# for j in range(1 ,3 +1 ):
# signs[j]=1
# if(diffs[1 ][j]*diffs[3 ][j]>mpmath.eps()):
# # If we do not have a signchange
# # and the absolute values are relatively large
# # there is probably no zero here
# if(abs(diffs[1][j])+abs(diffs[3][j]) > 0.01*var):
# return [0 ,0 ]
# elif(diffs[1 ][j]>0 ):
# signs[j]=-1
# # Recompute functionals using the signs
# if(S._verbose>1):
# print "h1=",h
# print "diffs1=",diffs
# print "signs=",signs
# for k in [1,3]:
# h[k]=0
# for j in range(1,3+1):
# h[k]=h[k]+signs[j]*diffs[k][j]
# Rnew=prediction(h[1 ],h[3 ],R1,R3)
# if S._verbose>1:
# print "h=",h
# print "Rnew=",Rnew
# [diffs[2],h[2]]=functional(S,Rnew,M,Y1,Y2,signs,c,first_time=False,method=met,ndigs=neps)
# zero_in=is_zero_in(h)
# if(zero_in == -1 ):
# R3=Rnew; h[3]=h[2 ]; diffs[3 ]=diffs[2 ]; errest=abs(Rnew-R1)
# else:
# R1=Rnew; h[1 ]=h[2 ]; diffs[1 ]=diffs[2 ]; errest=abs(Rnew-R3)
# step=0
# for j in range(100):
# Rnew=prediction(h[1 ],h[3 ],R1,R3)
# errest=max(abs(Rnew-R1),abs(Rnew-R3))
# if S._verbose>1:
# print "R1,R3,Rnew,errest=",R1,R3,Rnew,errest
# if errest<tol:
# return [Rnew,errest]
# [diffs[2 ],h[2 ]]=functional(S,Rnew,M,Y1,Y2,signs,c,first_time=False,method=met,ndigs=neps)
# zero_in=is_zero_in(h)
# if zero_in==0:
# return [Rnew,errest]
# elif zero_in not in [1,-1]:
# raise StopIteration()
# if zero_in==-1:
# stepz=abs(Rnew-R3)
# R3=Rnew; h[3 ]=h[2 ]; diffs[3 ]=diffs[2 ]; errest=abs(Rnew-R1)
# elif zero_in==1:
# stepz=abs(Rnew-R1)
# R1=Rnew; h[1 ]=h[2 ]; diffs[1 ]=diffs[2 ]; errest=abs(Rnew-R3)
# # If we have gone in the same direction too many times we need to modify our approach
# step=step+zero_in
# if S._verbose>1:
# print "step=",step
# if step>2: # Have gone too many times to the left
# Rtest=Rnew + mpmath.mpf(0.5)*stepz # Need to test if this modified R3 work:
# if S._verbose>1:
# print "Rtest(R)=",Rtest
# [diffs[2 ],h[2 ]]=functional(S,Rtest,M,Y1,Y2,signs,c,False,met,neps)
# if is_zero_in(h) ==-1: # all is ok
# R3=Rtest; h[3]=h[2]; diffs[3]=diffs[2]; step=step-1
# else: # Test another one
# Rtest=Rnew + mpmath.mpf(0.5)*abs(R1-R3) # Need to test if this modified R3 work:
# if S._verbose>1:
# print "Rtest(R)=",Rtest
# [diffs[2 ],h[2 ]]=functional(S,Rtest,M,Y1,Y2,signs,c,False,met,neps)
# if is_zero_in(h) ==-1: # all is ok
# R3=Rtest; h[3]=h[2]; diffs[3]=diffs[2]; step=step-1
# elif step<-2: # Have gone too many times to the right
# Rtest=Rnew - mpmath.mpf(0.5)*stepz
# if S._verbose>1:
# print "Rtest(L)=",Rtest
# [diffs[2 ],h[2 ]]=functional(S,Rtest,M,Y1,Y2,signs,c,False,met,neps)
# if is_zero_in(h) == 1: # all is ok
# R1=Rtest; h[1]=h[2]; diffs[1]=diffs[2]; step=step+1
# else:
# Rtest=Rnew - mpmath.mpf(0.5)*abs(R3-R1)
# if S._verbose>1:
# print "Rtest(L)=",Rtest
# [diffs[2 ],h[2 ]]=functional(S,Rtest,M,Y1,Y2,signs,c,False,met,neps)
# if is_zero_in(h) == 1: # all is ok
# R1=Rtest; h[1]=h[2]; diffs[1]=diffs[2]; step=step+1
# ####
def is_zero_in(h):
r"""
Tells which interval contains changes of sign.
INPUT:
- ''h'' -- dictionary h[j]=[h1,h2,h3]
OUTPUT:
- integer (-1 0 1)
EXAMPLES::
"""
zi=dict(); i=0
if(h[1]*h[2] < 0):
zi[-1]=1; i=-1
if(h[3]*h[2] < 0):
zi[1]=1; i=1
if(zi.values().count(1) >1 ): # need to split
return -2
#s="Neeed to split! Not implemented!"
#raise ValueError,s
return i
def get_character_sqrt(x):
if isinstance(x,sage.modular.dirichlet.DirichletCharacter):
if x.is_even():
for y in x.parent().list():
if y*y == x:
return y
raise ValueError,"Need an even character to get a square root of a character! Got:{0}".format(x)
def prediction(f0,f1,x0,x1):
r"""
Predict zero using the secant method.
INPUT:
- ''f0'' -- real
- ''f1'' -- real
- ''x0'' -- real
- ''x1'' -- real
OUTPUT:
- real
EXAMPLES::
sage: prediction(-1,1,9,10)
19/2
sage: prediction(-1.0,1.0,9.0,10.0)
9.50000000000000
"""
xnew=x0-f0*(x1-x0)/(f1-f0)
#if(xnew<x0 or xnew>x1):
# st= "Secant method ended up outside interval! \n"
# st+="input: f0,f1,x0,x1=%s,%s,%s,%s \n xnew=%s"
# raise ValueError,st%(f0,f1,x0,x1,xnew)
return xnew
def prediction_newton(x,f,df):
r"""
Predict zero using the secant method.
INPUT:
- ''x'' -- real
- ''f'' -- real, f(x)
- ''df'' -- real, f'(x)
OUTPUT:
- real
EXAMPLES::
sage: prediction_newton(1,9,10)
19/2
sage: prediction_newton(1.0,9.0,10.0)
9.50000000000000
"""
if(df==0.0):
st= "Newtons method failed! \n"
st+="f'(x)=0. input: f,df,x=%s,%s,%s"
raise ValueError,st%(f,df,x)
xnew=x-f/df
return xnew
def find_Y_and_M(G,R,ndigs=12,Yset=None,Mset=None):
r"""
Compute a good value of M and Y for Maass forms on G
INPUT:
- ''G'' -- group
- ''R'' -- real
- ''ndigs'' -- integer (number of desired digits of precision)
- ''Yset'' -- real (default None) if set we return M corr. to this Y
- ''Mset'' -- integer (default None) if set we return Y corr. to this M
OUTPUT:
- [Y,M] -- good values of Y (real) and M (integer)
EXAMPLES::
TODO:
Better and more effective bound
"""
import mpmath
l=G._level
if(Mset <> None):
# then we get Y corr. to this M
Y0=RR(3).sqrt()/RR(2*l)
if(Yset==None):
Y0=RR(3).sqrt()/RR(2*l)
Y=mpmath.fp.mpf(0.95*Y0)
else:
Y=mpmath.fp.mpf(Yset)
#print "Y=",Y,"Yset=",Yset
IR=mpmath.mpc(0,R)
eps= mpmath.fp.mpf(10 **-ndigs)
twopiY=mpmath.fp.pi*Y*mpmath.fp.mpf(2)
M0=get_M_for_maass(R,Y,eps)
if(M0<10):
M0=10
## Do this in low precision
dold=mpmath.mp.dps
#print "Start M=",M0
#print "dold=",dold
#mpmath.mp.dps=100
try:
for n in range(M0,10000,3):
X=mpmath.pi()*Y*mpmath.mpf(2*n)
#print "X,IR=",X,IR
test=mpmath.fp.besselk(IR,X)
if(abs(test)<eps):
raise StopIteration()
except StopIteration:
M=n
else:
M=n
raise Exception,"Error: Did not get small enough error:=M=%s gave err=%s" % (M,test)
mpmath.mp.dps=dold
return [Y,M]
def _testing_kbes(Rt=[1,10,10],Xt=[1,10,100]):
[R0,R1,NR]=Rt
[X0,X1,NX]=Xt
NRr=mpmath.mpf(NR)
NXr=mpmath.mpf(NX)
for j in range(1,NR):
rj=mpmath.mpf(j)
R=R0+R1*rj/NRr
print "r=",R
iR=mpmath.mpc(0,R)
for k in range(1,NX):
rk=mpmath.mpf(k)
x=X0+X1*rk/NXr
print "r,x=",R,x
if(x>R):
print "kbes_pow="
timeit( "besselk_dp(%s,%s)" %(R,x),repeat=1)
#else:
# print "kbes_rec="
# timeit( "besselk_dp_rec(R,x)",repeat=1)
print "mpmath.besselk="
timeit("mpmath.besselk(%s,%s)" %(iR,x),repeat=1)
#print "t1(",R,x,")=",t1
#print "t2(",R,x,")=",t2
if(R<15.0):
if(x<0.3 *R):
print "Case 1"
elif(x<=max(10.0 +1.2*R,2 *R)):
print "Case 2"
elif(R>20 and x>4 *R):
print "Case 3"
else:
print "Case 4"
def _test_Hecke_relations(a=2,b=3,C={}):
r"""Testing Hecke relations for the Fourier coefficients in C
INPUT:
-''C'' -- dictionary of complex (Fourier coefficients)
-''a'' -- integer
-''b'' -- integer
OUTPUT:
-''diff'' -- real : |C(a)C(b)-C(ab)| if (a,b)=1
EXAMPLE::
sage: S=MaassWaveForms(Gamma0(1))
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534)
sage: Y=mpmath.mpf(0.85)
sage: C=coefficients_for_Maass_waveforms(S,R,Y,10,20,12)
sage: d=_test_Hecke_relations(C,2,3); mppr(d)
'9.29e-8'
sage: C=coefficients_for_Maass_waveforms(S,R,Y,30,50,20)
sage: d=_test_Hecke_relations(C,2,3); mppr(d)
'3.83e-43'
"""
c=gcd(Integer(a),Integer(b))
if not C.has_key(0):
return 0
if C[0].has_key(a) and C[0].has_key(b) and C[0].has_key(a*b):
lhs=C[0][a]*C[0][b]
rhs=0
for d in divisors(c):
rhs=rhs+C[0][Integer(a*b/d/d)]
return abs(rhs-lhs)
return 0
def _test_Hecke_relations_all(C={}):
r"""
Test all possible Hecke relations.
EXAMPLE::
sage: S=MaassWaveForms(Gamma0(1))
sage: mpmath.mp.dps=100
sage: R=mpmath.mpf(9.53369526135355755434423523592877032382125639510725198237579046413534899129834778176925550997543536649304476785828585450706066844381418681978063450078510030977880577576)
sage: Y=mpmath.mpf(0.85)
sage: C=coefficients_for_Maass_waveforms(S,R,Y,30,50,20)
sage: test=_test_Hecke_relations_all(C); test
{4: '9.79e-68', 6: '4.11e-63', 9: '4.210e-56', 10: '9.47e-54', 14: '2.110e-44', 15: '4.79e-42', 21: '4.78e-28', 22: '1.02e-25', 25: '9.72e-19', 26: '2.06e-16'}
We can see how the base precision used affects the coefficients
sage: mpmath.mp.dps=50
sage: C=coefficients_for_Maass_waveforms(S,R,Y,30,50,20)
sage: test=_test_Hecke_relations_all(C); test
sage: test=_test_Hecke_relations_all(C); test
{4: '1.83e-48', 6: '4.75e-43', 9: '3.21e-36', 10: '1.24e-33', 14: '4.41e-25', 15: '1.53e-23', 21: '6.41e-8', 22: '4.14e-6', 25: '91.455', 26: '12591.0'}
"""
N=max(C[0].keys())
test=dict()
for a in prime_range(N):
for b in prime_range(N):
if(a*b <= N):
test[a*b]=mppr(_test_Hecke_relations(C,a,b))
return test
def solve_system_for_Maass_waveforms(W,N=None,deb=False,force_type=None):
r"""
Choose the correct solver algorithm.
"""
x=W['V'][0,0]
if force_type=="mpc" and hasattr(W['V'],"ctx"):
A=mat_conv_to_mpc(W['V'])
W['V']=A
elif force_type=="mpmath" and hasattr(W['V'],"QR"):
A=mat_conv_to_mpmath(W['V'])
W['V']=A
if hasattr(W['V'],"ctx"): ## We have an mpmath matrix
## Recall that we may need to adjust the precision for mpmath solutions
return solve_system_for_Maass_waveforms_mpmath(W,N,deb)
elif hasattr(W['V'],"QR"): # Is a Matirx_complex_dense instance
return solve_system_for_Maass_waveforms_mpc(W,N,deb)
else:
raise ValueError,"Unknown type of matrix!: {0}".format(type(W['V']))
def solve_system_for_Maass_waveforms_mpmath(W,N=None,deb=False,gr=False):
r"""
Solve the linear system to obtain the Fourier coefficients of Maass forms
INPUT:
- ``W`` -- (system) dictionary
- ``W['Ms']`` -- M start
- ``W['Mf']`` -- M stop
- ``W['nc']`` -- number of cusps
- ``W['V']`` -- matrix of size ((Ms-Mf+1)*nc)**2
- ``W['RHS']`` -- right hand side (for inhomogeneous system) matrix of size ((Ms-Mf+1)*nc)*(dim)
- ``N`` -- normalisation (dictionary, output from the set_norm_for_maass function)
- ``N['SetCs']`` -- Which coefficients are set
- ``N['Vals'] `` -- To which values are these coefficients set
- ``N['comp_dim']``-- How large is the assumed dimension of the solution space
- ``deb`` -- print debugging information (default False)
OUTPUT:
- ``C`` -- Fourier coefficients
EXAMPLES::
sage: S=MaassWaveForms(MySubgroup(Gamma0(1)))
sage: mpmath.mp.dps=20
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: Y=mpmath.mpf(0.5)
sage: W=setup_matrix_for_Maass_waveforms(S,R,Y,12,22)
sage: N=S.set_norm_maass(1)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='-1.8055426724989656270259e-14', imag='1.6658248366482944572967e-19')
If M is too large and the precision is not high enough the matrix might be numerically singular
W=setup_matrix_for_Maass_waveforms(G,R,Y,20,40)
sage: C=solve_system_for_Maass_waveforms(W,N)
Traceback (most recent call last)
...
ZeroDivisionError: Need higher precision! Use > 23 digits!
Increasing the precision helps
sage: mpmath.mp.dps=25
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='3.780824715556976438911480324e-25', imag='2.114746048869188750991752872e-99')
"""
import mpmath
V=W['V']
Ms=W['Ms']
Mf=W['Mf']
nc=W['nc']
M=W['space']
verbose=M._verbose
Ml=Mf-Ms+1
if N==None:
N = M.set_norm()
if hasattr(V,'shape'):
nrows,ncols=V.shape
else:
if hasattr(V,"rows"):
nrows=V.rows
ncols=V.cols
if(ncols<>Ml*nc or nrows<>Ml*nc):
raise Exception," Wrong dimension of input matrix!"
if M._verbose>0:
print "Norm=",N
SetCs=N['SetCs'][0]
Vals=N['Vals']
comp_dim=N['comp_dim']
if(N['cuspidal']):
for i in range(1,nc):
if(SetCs.count((i,0))==0):
SetCs.append((i,Ml))
for fn_j in range(comp_dim):
Vals[fn_j][(i,Ml)]=0
setc_list=list()
vals_list=dict()
for j in range(comp_dim):
vals_list[j]=dict()
for r,n in SetCs:
if r*Ml+n-Ms<0:
continue
setc_list.append(r*Ml+n-Ms)
for j in range(comp_dim):
vals_list[j][r*Ml+n-Ms]=Vals[j][(r,n)]
if verbose>0:
print "setc_list=",setc_list
print "vals_list=",vals_list
if(Ms<0):
use_sym=0
else:
use_sym=1
#if(use_sym==1 and SetCs.count(0)>0):
# num_set=len(N['SetCs'])-1
#else:
num_set=len(setc_list)
t=V[0,0]
if(isinstance(t,float)):
mpmath_ctx=mpmath.fp
else:
mpmath_ctx=mpmath.mp
if(W.has_key('RHS')):
RHS=W['RHS']
else:
RHS=mpmath_ctx.matrix(int(Ml*nc-num_set),int(comp_dim))
LHS=mpmath_ctx.matrix(int(Ml*nc-num_set),int(Ml*nc-num_set))
roffs=0
if(deb):
print "num_set,use_sym=",num_set,use_sym
print "SetCs,Vals=",SetCs,Vals
print "V.rows,cols=",nrows,ncols
print "LHS.rows,cols=",LHS.rows,LHS.cols
print "RHS.rows,cols=",RHS.rows,RHS.cols
print "mpctx=",mpmath_ctx
for r in range(nrows):
#cr=r+Ms
if setc_list.count(r)>0:
roffs=roffs+1
continue
for fn_j in range(comp_dim):
RHS[r-roffs,fn_j]=mpmath_ctx.mpf(0)
for cset in setc_list:
v=vals_list[fn_j][cset]
if(mpmath_ctx==mpmath.mp):
tmp=mpmath_ctx.mpmathify(v)
elif(isinstance(v,float)):
tmp=mpmath_ctx.mpf(v)
else:
tmp=mpmath_ctx.mpc(v)
tmp=tmp*V[r,cset]
RHS[r-roffs,fn_j]=RHS[r-roffs,fn_j]-tmp
#print "RHS[",r-roffs,fn_j,"]=",RHS[r-roffs,fn_j]
#print "V[",r,",",cset,"]=",V[r,cset]
coffs=0
for k in range(ncols):
if setc_list.count(k)>0:
coffs=coffs+1
continue
#print "roffs,coffs=",roffs,coffs
#print "r-roffs,k-coffs=",r-roffs,k-coffs
LHS[r-roffs,k-coffs]=V[r,k]
#print "LHS[",r,k,"]=",LHS[r-roffs,k-coffs]
if gr:
return LHS,RHS
done=0; j=0
oldprec=mpmath.mp.dps
while done==0 and j<=10:
if W['space']._verbose>1:
print "Trying to solve with prec=",mpmath.mp.dps
try:
A, p = mpmath_ctx.LU_decomp(LHS)
done=1
except ZeroDivisionError:
t1=smallest_inf_norm(LHS)
if verbose>0:
print "n=",smallest_inf_norm(LHS)
t2=mpmath_ctx.log10(smallest_inf_norm(LHS))
t3=mpmath_ctx.ceil(-t2)
isinf=False
if(isinstance(t3,float)):
isinf = (t3 == float(infinity))
if(isinstance(t3,sage.libs.mpmath.ext_main.mpf)):
isinf = ((t3.ae(mpmath.inf)) or t3==mpmath.inf)
if(isinstance(t3,sage.rings.real_mpfr.RealLiteral)):
isinf = t3.is_infinity()
if(isinf):
raise ValueError, " element in LHS is infinity! t3=%s" %t3
t=int(t3)
mpmath.mp.dps= t + 5
X=dict()
for fn_j in range(comp_dim):
X[fn_j] = dict() #mpmath.matrix(int(Ml),int(1))
b = mpmath_ctx.L_solve(A, RHS.column(fn_j), p)
TMP = mpmath_ctx.U_solve(A, b)
roffs=0
res = mpmath_ctx.norm(mpmath_ctx.residual(LHS, TMP, RHS.column(fn_j)))
#print "res(",fn_j,")=",res
for i in range(nc):
X[fn_j][i]=dict()
for n in range(Ml):
if setc_list.count(n)>0:
roffs=roffs+1
#print "X[",fn_j,",",n,",Vals[fn_j][n]
X[fn_j][0][n+Ms]=vals_list[fn_j][n]
continue
X[fn_j][0][n+Ms]=TMP[n-roffs,0]
#print "X[",fn_j,",",n+Ms,"=",TMP[n-roffs,0]
for i in range(1,nc):
for n in range(Ml):
if setc_list.count(n+i*Ml)>0:
#(SetCs.count(n+Ms+i*Ml)>0):
roffs=roffs+1
# print "X[",fn_j,",",n,",Vals[fn_j][n]
X[fn_j][i][n+Ms]=vals_list[fn_j][n+i*Ml]
continue
X[fn_j][i][n+Ms]=TMP[n+i*Ml-roffs,0]
# return x
#print "keys:",X.keys()
mpmath.mp.dps=oldprec
return X
def solve_system_for_Maass_waveforms_mpc(W,N=None,gr=False,cn=False):
r"""
Solve the linear system to obtain the Fourier coefficients of Maass forms
INPUT:
- ``H`` -- Space of Maass waveforms
- ``W`` -- (system) dictionary
- ``W['Ms']`` -- M start
- ``W['Mf']`` -- M stop
- ``W['nc']`` -- number of cusps
- ``W['V']`` -- matrix of size ((Ms-Mf+1)*nc)**2
- ``W['RHS']`` -- right hand side (for inhomogeneous system) matrix of size ((Ms-Mf+1)*nc)*(dim)
- ``N`` -- normalisation (dictionary, output from the set_norm_for_maass function, default None)
- if N=None we assume that the solution is uniquely determined by the prinicpal part (in the right hand side)
- ``N['SetCs']`` -- Which coefficients are set
- ``N['Vals'] `` -- To which values are these coefficients set
- ``N['comp_dim']``-- How large is the assumed dimension of the solution space
- ``N['num_set']`` -- Number of coefficients which are set
- ''gr'' -- only return the reduced matrix and right hand side. do not perform the solving .
- ''cn'' -- logical (default False) set to True to compute the max norm of V^-1
OUTPUT:
- ``C`` -- Fourier coefficients
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: mpmath.mp.dps=20
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: Y=mpmath.mpf(0.5)
sage: W=setup_matrix_for_Maass_waveforms(G,R,Y,12,22)
sage: N=set_norm_maass(1)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='-1.8055426724989656270259e-14', imag='1.6658248366482944572967e-19')
If M is too large and the precision is not high enough the matrix might be numerically singular
W=setup_matrix_for_Maass_waveforms(G,R,Y,20,40)
sage: C=solve_system_for_Maass_waveforms(W,N)
Traceback (most recent call last)
...
ZeroDivisionError: Need higher precision! Use > 23 digits!
Increasing the precision helps
sage: mpmath.mp.dps=25
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='3.780824715556976438911480324e-25', imag='2.114746048869188750991752872e-99')
"""
V=W['V']
Ms=W['Ms']
Mf=W['Mf']
H=W['space']
#nc=W['nc']
Ml=Mf-Ms+1
get_reduced_matrix=gr
verbose = H._verbose
comp_norm=cn
nc=H.group().ncusps()
if V.ncols()<>Ml*nc or V.nrows()<>Ml*nc:
raise Exception," Wrong dimension of input matrix!"
if N==None:
N = H.set_norm(1)
SetCs=N['SetCs'][0]
Vals=N['Vals']
comp_dim=N['comp_dim']
num_set=len(SetCs[0])
t=V[0,0]
CF=MPComplexField(H._prec)
MS = MatrixSpace(CF,int(Ml*nc-num_set),int(comp_dim))
RHS=Matrix_complex_dense(MS,0,True,True)
MS = MatrixSpace(CF,int(Ml*nc-num_set),int(Ml*nc-num_set))
LHS=Matrix_complex_dense(MS,0,True,True)
nrows=V.nrows()
ncols=V.ncols()
if(N['cuspidal']):
for i in range(1,nc):
if(SetCs.count((i,0))==0):
SetCs.append((i,Ml))
for fn_j in range(comp_dim):
Vals[fn_j][(i,Ml)]=0
if verbose>0:
print "SetCs=",SetCs
setc_list=list()
vals_list=dict()
for j in range(comp_dim):
vals_list[j]=dict()
for r,n in SetCs:
if r*Ml+n-Ms<0:
continue
setc_list.append(r*Ml+n-Ms)
for j in range(comp_dim):
vals_list[j][r*Ml+n-Ms]=Vals[j][(r,n)]
if verbose>0:
print "Ml=",Ml
print "num_set=",num_set
print "SetCs=",SetCs
print "Vals=",Vals
print "setc_list=",setc_list
print "vals_list=",vals_list
print "V.rows=",V.nrows()
print "V.cols=",V.ncols()
print "LHS.rows=",LHS.nrows()
print "LHS.cols=",LHS.ncols()
print "RHS.rows=",RHS.nrows()
print "RHS.cols=",RHS.ncols()
print "N=",N
num_rhs=0
if(W.has_key('RHS')):
num_rhs=W['RHS'].ncols()
if num_rhs>0 and num_rhs<>comp_dim:
raise ValueError,"Need same number of right hand sides (or just one) as the number of set coefficients!"
if V.nrows() <> nc*Ml:
raise ArithmeticError," Matrix does not have correct size!"
roffs=0
for r in range(nrows):
#cr=r+Ms
if setc_list.count(r)>0:
roffs=roffs+1
continue
for fn_j in range(comp_dim):
if W.has_key('RHS'):
RHS[r-roffs,fn_j]=-W['RHS'][r,rhs_j]
else:
RHS[r-roffs,fn_j]=CF(0)
for cset in setc_list:
tmp=CF(vals_list[fn_j][cset])
tmp=tmp*V[r,cset]
RHS[r-roffs,fn_j]=RHS[r-roffs,fn_j]-tmp
coffs=0
for k in range(ncols):
if setc_list.count(k)>0:
coffs=coffs+1
if verbose>1:
print "skipping colum:",k
continue
if verbose>1 and r-roffs==1:
print "Setting LHS[1,",k-coffs
LHS[r-roffs,k-coffs]=V[r,k] # for a in range(nc):
if get_reduced_matrix:
# return [LHS,RHS]
return [LHS,RHS]
maxit=100;i=0
done=False
dps0=CF.prec()
# while (not done and i<=maxit):
# try:
# Q,R = LHS.qr_decomposition()
# done=True
# except ZeroDivisionError:
# t=int(ceil(-log_b(smallest_inf_norm(LHS),10)))
# dps=t+5*i; i=i+1
# print "raising number of digits to:",dps
# LHS.set_prec(dps)
# # raise ZeroDivisionError,"Need higher precision! Use > %s digits!" % t
# if i>=maxit:
# raise ZeroDivisionError,"Can not raise precision enough to solve system! Should need > %s digits! and %s digits was not enough!" % (t,dps)
if comp_norm:
max_norm=LHS.norm()
for j in range(LHS.rows):
#y=mpmath_ctx.matrix(LHS.rows,int(1)); y[j,0]=1
y = Vector_complex_dense(vector(CF,LHS.rows).parent(),0)
y[j]=1
TMP = LHS.solve(y) #pmath_ctx.U_solve(A, b)
tmpnorm=max(map(abs,TMP))
if(tmpnorm>max_norm):
max_norm=tmpnorm
print "max norm of V^-1=",max_norm
X=dict()
for fn_j in range(comp_dim):
X[fn_j] = dict() #mpmath.matrix(int(Ml),int(1))
v = RHS.column(fn_j)
if verbose>1:
print "len(B)=",len(v)
TMP = LHS.solve(v)
roffs=0
res = (LHS*TMP-v).norm()
if verbose>0:
print "res(",fn_j,")=",res
for i in range(nc):
X[fn_j][i]=dict()
for i in range(nc):
for n in range(Ml):
if setc_list.count(n+i*Ml)>0:
roffs=roffs+1
X[fn_j][i][n+Ms]=vals_list[fn_j][n+i*Ml]
continue
X[fn_j][i][n+Ms]=TMP[n+i*Ml-roffs]
return X
def Hecke_action(self,p):
r"""
Return T_p(F)
Note: Only Fourier coefficients at infinity are computed
"""
res = copy(self)
c = res._coeffs
x=self._space._multiplier._character
for r in res._coeffs.keys():
res._coeffs[r]=dict()
res._coeffs[r][0]=dict()
Ms = min(self._coeffs[r][0].keys())
Mf = max(self._coeffs[r][0].keys())
if Ms<0:
Ms=ceil(RR(Ms)/RR(p))
Mf=floor(RR(Mf)/RR(p))
for n in range(Ms,Mf+1):
tmp=0
if self._coeffs[r][0].has_key(n*p):
tmp += self._coeffs[r][0][n*p]
if (n%p)==0:
m = Integer(n/p)
if self._coeffs[r][0].has_key(m):
tmp+=x(p)*self._coeffs[r][0][m]
res._coeffs[r][0][n]=tmp
return res
def solve_system_for_Maass_waveforms_GaussElim(W,N=None,gr=False,cn=False):
r"""
Solve the linear system to obtain the Fourier coefficients of Maass forms
"""
V=W['V']
Ms=W['Ms']
Mf=W['Mf']
H=W['space']
Ml=Mf-Ms+1
get_reduced_matrix=gr
verbose = H._verbose
comp_norm=cn
nc=H.group().ncusps()
if V.ncols()<>Ml*nc or V.nrows()<>Ml*nc:
raise Exception," Wrong dimension of input matrix!"
if N==None:
N = H.set_norm(1)
SetCs=N['SetCs'][0]
Vals=N['Vals']
comp_dim=N['comp_dim']
num_set=len(SetCs[0])
t=V[0,0]
CF=MPComplexField(H._prec)
MS = MatrixSpace(CF,int(Ml*nc-num_set),int(comp_dim))
RHS=Matrix_complex_dense(MS,0,True,True)
MS = MatrixSpace(CF,int(Ml*nc-num_set),int(Ml*nc-num_set))
LHS=Matrix_complex_dense(MS,0,True,True)
nrows=V.nrows()
ncols=V.ncols()
if(N['cuspidal']):
for i in range(1,nc):
if(SetCs.count((i,0))==0):
SetCs.append((i,Ml))
for fn_j in range(comp_dim):
Vals[fn_j][(i,Ml)]=0
if verbose>0:
print "SetCs=",SetCs
setc_list=list()
vals_list=dict()
for j in range(comp_dim):
vals_list[j]=dict()
for r,n in SetCs:
if r*Ml+n-Ms<0:
continue
setc_list.append(r*Ml+n-Ms)
for j in range(comp_dim):
vals_list[j][r*Ml+n-Ms]=Vals[j][(r,n)]
if verbose>0:
print "Ml=",Ml
print "num_set=",num_set
print "SetCs=",SetCs
print "Vals=",Vals
print "setc_list=",setc_list
print "vals_list=",vals_list
print "N=",N
num_rhs=0
if(W.has_key('RHS')):
num_rhs=W['RHS'].ncols()
if num_rhs>0 and num_rhs<>comp_dim:
raise ValueError,"Need same number of right hand sides (or just one) as the number of set coefficients!"
if V.nrows() <> nc*Ml:
raise ArithmeticError," Matrix does not have correct size!"
roffs=0
for r in range(nrows):
#cr=r+Ms
if setc_list.count(r)>0:
roffs=roffs+1
continue
for fn_j in range(comp_dim):
if W.has_key('RHS'):
RHS[r-roffs,fn_j]=-W['RHS'][r,rhs_j]
else:
RHS[r-roffs,fn_j]=CF(0)
for cset in setc_list:
tmp=CF(vals_list[fn_j][cset])
tmp=tmp*V[r,cset]
RHS[r-roffs,fn_j]=RHS[r-roffs,fn_j]-tmp
coffs=0
for k in range(ncols):
if setc_list.count(k)>0:
coffs=coffs+1
continue
LHS[r-roffs,k-coffs]=V[r,k] # for a in range(nc):
if get_reduced_matrix:
# return [LHS,RHS]
return [LHS,RHS]
maxit=100;i=0
dps0=CF.prec()
X=dict()
for fn_j in range(comp_dim):
X[fn_j] = dict() #mpmath.matrix(int(Ml),int(1))
b = RHS.column(fn_j)
done=0
while (not done and i<=maxit):
try:
C=solve_using_Gauss_elem(LHS,b)
done=1
except ZeroDivisionError:
pass
if verbose>0:
res = (LHS*C-b).norm()
print "res(",fn_j,")=",res
for i in range(nc):
X[fn_j][i]=dict()
for i in range(nc):
roffs=0
for n in range(Ml):
if setc_list.count(n+i*Ml)>0:
roffs=roffs+1
X[fn_j][i][n+Ms]=vals_list[fn_j][n+i*Ml]
continue
#print "C[",n+i*Ml-roffs,"=",C[n+i*Ml-roffs]
X[fn_j][i][n+Ms]=C[n+i*Ml-roffs]
return X
def mat_conv_to_mpc(A):
if hasattr(A,"QR"):
return A
if hasattr(A,"ctx"):
m=A.rows
n=A.cols
prec=mpmath.mp.prec
elif hasattr(A,"nrows"):
m=A.nrows()
n=A.ncols()
prec=A[0,0].parent().prec()
else:
raise TypeError,"Cabn not convert matrix of type:{0}".format(type(A))
CF=MPComplexField(prec)
MS=MatrixSpace(CF,m,n)
V=Matrix_complex_dense(MS,0)
for i in range(m):
for j in range(n):
tmp=A[i,j]
V[i,j]=CF(tmp.real,tmp.imag)
return V
def mat_conv_to_mpmath(A):
if hasattr(A,"ctx"):
return A
if hasattr(A,"nrows"):
m=A.nrows()
n=A.ncols()
old_prec=mpmath.mp.prec
mpmath.mp.prec=A[0,0].parent().prec()
V=mpmath.mp.matrix(int(m),int(n))
for i in range(m):
for j in range(n):
tmp=A[i,j]
V[i,j]=mpmath.mp.mpc(tmp.real(),tmp.imag())
mpmath.mp.prec=old_prec
return V
def mat_conv_to_complex(A):
if hasattr(A,"ctx"):
return A
if hasattr(A,"nrows"):
m=A.nrows()
n=A.ncols()
old_prec=mpmath.mp.prec
mpmath.mp.prec=A[0,0].parent().prec()
V=mpmath.mp.matrix(int(m),int(n))
for i in range(m):
for j in range(n):
tmp=A[i,j]
V[i,j]=mpmath.mp.mpc(tmp.real(),tmp.imag())
mpmath.mp.prec=old_prec
return V
def my_kbes(r,x,mp_ctx=None):
r"""Scaled K-Bessel function with
INPUT:
- ''r'' -- real
- ''x'' -- real
- ''mp_ctx'' -- mpmath context (default None)
OUTPUT:
- real -- K_ir(x)*exp(pi*r/2)
EXAsMPLES::
sage: my_kbes(9.0,1.0)
mpf('-0.71962866121965863')
sage: my_kbes(9.0,1.0,mpmath.fp)
-0.71962866121967572
"""
import mpmath
if mp_ctx==None or mp_ctx==mpmath.fp or mpamth.dps<=15:
# use fast routine
return besselk_dp(RR(r),RR(x))
else:
pi=mpmath.mp.pi()
k=mp_cyx.besselk(mp_ctx.mpc(0,r),mp_ctx.mpf(x))
f=k*mp_ctx.exp(r*mp_ctx.mpf(0.5)*pi)
return f.real
def my_kbes_diff_r(r,x,mp_ctx=None):
r"""
Approximation to the derivative with respect to R of the scaled K-Bessel function.
INPUT:
- ''r'' -- real
- ''x'' -- real
- ''ctx'' -- mpmath context (default mpmath.mp)
OUTPUT:
- real -- K_ir(x)*exp(pi*r/2)
EXAMPLES::
sage: my_kbes_diff_r(9.45,0.861695276766 ,mpmath.fp)
-0.31374673969963851
sage: my_kbes_diff_r(9.4,0.861695276766 ,mpmath.fp)
0.074219541623676832
"""
h=mp_ctx.mpf(1e-8)
f1 = my_kbes(r,x+h,mp_ctx)
f2 = my_kbes(r,x,mp_ctx)
diff=(f2-f1)/h
return diff
### If we need to figure out the format of input, i.e. how many levels of dictionary we have
## Ex: C is input coefficients and:
## C[0][0][0] = 0
## C[0][0][1] = 1
def dict_depth(d,i=0):
if isinstance(d,dict):
if d.has_key(0):
return dict_depth(d[0],i+1)
return i
def scattering_determinant_Hecke_triangle(s,q,prec=0,use_eisenstein=0,**kwds):
r"""
Computes the scattering determinant phi(s)
for the Hecke triangle group G_q
INPUT:
- ``s`` -- complex
- ``prec`` -- precision used if s does not have precision
"""
if q not in [3,4,6] and use_eisenstein==0:
## Otherwise we can only use Eisenstein series
raise NotImplementedError
elif use_eisenstein == 0:
z = scattering_determinant_sl2z(s,prec=prec)
if q==3:
return z
if q==4:
llambda = s.parent()(2).sqrt()
elif q==6:
llambda = s.parent()(3).sqrt()
f1 = llambda**(1-2*s)
llambda = llambda.log()
f = f1*( (1-s)*llambda).cosh()/( s*llambda).cosh()
return f*z
else:
G = HeckeTriangleGroup(q)
M = MaassWaveForms(G)
if prec>0:
s = ComplexField(prec)(s)
return M.scattering_determinant(s,**kwds)
def scattering_determinant_sl2z(s,prec=0,verbose=0):
r"""
Computes the scattering determinant :
phi(s)=sqrt(pi)gamma(s-1/2)zeta(2s-1)/gamma(s)/zeta(2s)
for PSL2(Z).
INPUT:
- ``s`` -- complex
- ``prec`` -- precision used if s does not have precision
"""
if prec<=0:
prec = 53
if hasattr(s,"prec"):
if prec<s.prec():
prec = s.prec()
if verbose>0:
print "prec=",prec
RF=RealField(prec)
CF = ComplexField(prec)
s = CF(s.real(),s.imag())
sqpi=RF.pi().sqrt()
mp1=RF(1); mp2=RF(2); mp05=RF(1)/RF(2)
res = sqpi*(s-mp05).gamma()*(mp2*s-mp1).zeta()
res = res/s.gamma()/(mp2*s).zeta()
return res
def eisenstein_series_coefficient_sl2z(s,m,prec=0):
r"""
Computes the Fourier coefficients of the Eisenstein series E(s;z) for PSL(2,Z) using the explicit formula.
INPUT:
- ``s`` -- complex
- ``m`` -- integer
- ``prec`` -- precision used if s does not have precision
"""
if hasattr(s,"prec"):
prec = s.prec()
elif prec>0:
prec = prec
else:
prec = 53
RF=RealField(prec)
CF = ComplexField(prec)
s = CF(s)
mppi=RF.pi()
mp1=RF(1); mp2=RF(2); mp05=RF(1)/RF(2)
res = mp2*mppi**s*abs(m)**(s-mp05)
res = res/s.gamma()/(mp2*s).zeta()
summa=CF(0)
for d in divisors(m):
summa+=RF(d)**(mp1-2*s)
res = res * summa
return res
| Python |
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2010 Fredrik Strömberg <stroemberg@mathematik.tu-darmstadt.de>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
r"""
The Weil representation corresponding to the discriminant form $D$ of a rank-one lattice $L=(ZZ, q:x-> Nx**2)$, i.e. $D=(Z/NZ,q mod 1)$.
"""
from sage.all import Parent,QQ,ZZ,Integer,SL2Z,CyclotomicField,lcm,odd_part,kronecker,gcd,IntegerModRing,matrix,is_odd,valuation,sqrt,MatrixSpace,CC,powerset
from sage.rings.complex_mpc import MPComplexField
from psage.matrix.matrix_complex_dense import Matrix_complex_dense
class WeilRepDiscriminantForm(Parent):
r""" An elementary version of the Weil representation of the finite quadratic module
given by D=Z/2NZ.
"""
def __init__(self,N,dual=False):
r""" Creates a Weil representation (or its dual) of the discriminant form given by D=Z/2NZ.
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(1,dual=True)
sage: WR.D
[0, 1/2]
sage: WR.D_as_integers
[0, 1]
sage: WR.Qv
[0, -1/4]
sage: WR=WeilRepDiscriminantForm(1,dual=False)
sage: WR.D
[0, 1/2]
sage: WR.D_as_integers
[0, 1]
sage: WR.Qv
[0, 1/4]
"""
## If N<0 we use |N| and set dual rep. to true
if N<0:
self.N=-N
self.dual=True
self._is_dual_rep= not dual # do we use dual representation or not
else:
self.N=N
self._is_dual_rep=dual
N2=Integer(2*self.N)
self.group=SL2Z
self._level=4*self.N
self.D_as_integers=range(0,N2)
self.D=list()
for x in range(0,N2):
y=QQ(x/N2)
self.D.append(y)
self.Qv=list() # List of +- q(x) for x in D
self.Qv_times_level=list() # List of +- 4N*q(x) for x in D
if self._is_dual_rep: # we add this already here for efficiency
sig=-1
else:
sig=1
for x in self.D:
y=sig*self.Q(x)
self.Qv.append(y)
self.Qv_times_level.append(self._level*y)
self._signature = sig
self._sigma_invariant = CyclotomicField(8).gens()[0]**-self._signature
def list(self):
return self.DEF
def __reduce__(self):
r""" Used for pickling.
"""
return(WeilRepDiscriminantForm,(self.N,self._is_dual_rep))
def __cmp__(self,other):
r""" Compare self to other.
"""
if(not isinstance(other,WeilRepDiscriminantForm)):
return False
eq = (self.N==other.N) and (self._is_dual_rep==other._is_dual_rep)
print "eq=",eq
return eq
def _repr_(self):
r"""
Returns string representation of self.
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(1,dual=False);WR
Weil representation of the discriminant form given by ZZ/2ZZ with quadratic form Q(x)=1*x**2 mod 1.
sage: WR=WeilRepDiscriminantForm(1,dual=True);WR
Dual of Weil representation of the discriminant form given by ZZ/2ZZ with quadratic form Q(x)=1*x**2 mod 1.
"""
if self._is_dual_rep:
s="Dual of "
else:
s=""
s+="Weil representation of the discriminant form given by ZZ/"+str(2*self.N)+"ZZ with quadratic form Q(x)="+str(self.N)+"*x**2 mod 1."
return s
def _latex_(self):
r""" Returns LaTeX string representation of self.
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(2,dual=False)
sage: latex(WR)
Weil representation of the discriminant form given by $\mathbb{Z}/4\mathbb{Z}$ with quadratic form $Q(x)=2\,x^{2} \mathrm{mod} 1$.
"""
s="\\begin{verbatim}\\end{verbatim}"
if self._is_dual_rep:
s+="Dual of "
else:
s+=""
# s+="Weil representation of the discriminant form given by $\\mathbb{Z}/"+str(2*self.N)+"\\mathbb{Z}$ \\text{ with quadratic form } Q(x)="+latex(self.N)+"\\,x^{2}\\, \\mathrm{mod}\\, 1$ .\end{verbatim}}"
s+="Weil representation of the discriminant form given by $\\mathbb{Z}/"+str(2*self.N)+"\\mathbb{Z}$"
s+=" with quadratic form $Q(x)="+latex(self.N)+"\\,x^{2}\\, \\mathrm{mod}\\, 1$."
return s
def is_dual(self):
r"""
Returns True if we have the dual Weil representation, otherwise False.
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(1,dual=True);WR.is_dual()
True
sage: WR=WeilRepDiscriminantForm(1,dual=False);WR.is_dual()
False
"""
return self._is_dual_rep
def Q(self,x):
r"""
Quadratic form on x, Q(x) mod 1
INPUT:
-''x'' -- rational
OUTPUT:
-''Q(x'' -- rational
EXAMPLES::
sage: DF=DiscriminantForm(1,False)
sage: DF.Q(1/2)
1/4
"""
r=self.N*x*x
p=r.numerator()
q=r.denominator()
res=QQ(p % q)/QQ(q)
return res
def B(self,x,y):
r"""
Bilinear form B(x,y) mod 1, givenby the quadratic form Q
INPUT:
-''x'' -- rational
-''y'' -- rational
OUTPUT:
-''B(x,y)'' -- rational
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(3,dual=True)
sage: WR.B(1/6,1/2)
1/2
sage: WR.B(1/6,1/6)
1/6
sage: WR.B(1/6,-1+1/6)
1/6
"""
#print "N=",self.N,x,y
r=Integer(2)*self.N*x*y
p=r.numerator()
q=r.denominator()
res=QQ(p % q)/QQ(q)
return res
def sigma_invariant(self):
return self._sigma_invariant
def negative_element(self,r):
r"""
Return the negative of r in the abelian group of self.
"""
if r in self.D:
minus_r = QQ(1 - r)
elif r in self.D_as_integers:
minus_r = self.N*2 - r
else:
raise ValueError,"Need element in the abelian group of self! Got {0}".format(r)
return minus_r
def Qc(self,c,x):
r""" compute Q_c(x) for x in D^c*
"""
Dcstar=self._D_times_c_star(c)
if (not x in Dcstar):
raise ValueError," Call only for x in D^c*! Got x=%s and D^c*=%s" %(x,Dcstar)
xc=0
if(valuation(c,2)==valuation(2*self.N,2)):
xc=QQ(1)/QQ(2)
cy=x-xc
Dc=self._D_times_c(c)
for y in Dc:
p=numerator(y*c)
q=denominator(y*c)
if( QQ(p%q)/QQ(q) == QQ(cy)):
Qc=c*self.Q(y)+self.B(xc,y)
return Qc
return ArithmeticError," Could not find y s.t. x=x_c+cy! x=%s and c=%s " %(x,c)
### We now add functions for computing the corresponding Weil representation
def xi(self,A):
r""" The eight-root of unity in front of the Weil representation.
INPUT:
-''N'' -- integer
-''A'' -- element of PSL(2,Z)
EXAMPLES::
sage: A=SL2Z([41,77,33,62])
sage: WR.xi(A)
-zeta8^3]
sage: S,T=SL2Z.gens()
sage: WR.xi(S)
-zeta8^3
sage: WR.xi(T)
1
sage: A=SL2Z([-1,1,-4,3])
sage: WR.xi(A)
-zeta8^2
sage: A=SL2Z([0,1,-1,0])
sage: WR.xi(A)
-zeta8
"""
a=Integer(A[0,0]); b=Integer(A[0,1])
c=Integer(A[1,0]); d=Integer(A[1,1])
if(c==0):
return 1
z=CyclotomicField(8).gen()
N=self.N
N2=odd_part(N)
Neven=Integer(2*N/N2)
c2=odd_part(c)
Nc=gcd(Integer(2*N),Integer(c))
cNc=Integer(c/Nc)
f1=kronecker(-a,cNc)
f2=kronecker(cNc,Integer(2*N/Nc))
if(is_odd(c)):
s=c*N2
elif( c % Neven == 0):
s=(c2+1-N2)*(a+1)
else:
s=(c2+1-N2)*(a+1)-N2*a*c2
r=-1-QQ(N2)/QQ(gcd(c,N2))+s
xi=f1*f2*z**r
return xi
def rho(self,M,silent=0,numeric=0,prec=-1):
r""" The Weil representation acting on SL(2,Z).
INPUT::
-``M`` -- element of SL2Z
- ''numeric'' -- set to 1 to return a Matrix_complex_dense with prec=prec instead of exact
- ''prec'' -- precision
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(1,dual=False)
sage: S,T=SL2Z.gens()
sage: WR.rho(S)
[
[-zeta8^3 -zeta8^3]
[-zeta8^3 zeta8^3], sqrt(1/2)
]
sage: WR.rho(T)
[
[ 1 0]
[ 0 -zeta8^2], 1
]
sage: A=SL2Z([-1,1,-4,3]); WR.rho(A)
[
[zeta8^2 0]
[ 0 1], 1
]
sage: A=SL2Z([41,77,33,62]); WR.rho(A)
[
[-zeta8^3 zeta8^3]
[ zeta8 zeta8], sqrt(1/2)
]
"""
N=self.N; D=2*N; D2=2*D
if numeric==0:
K=CyclotomicField (lcm(4*self.N,8))
z=K(CyclotomicField(4*self.N).gen())
rho=matrix(K,D)
else:
CF = MPComplexField(prec)
RF = CF.base()
MS = MatrixSpace(CF,int(D),int(D))
rho = Matrix_complex_dense(MS)
#arg = RF(2)*RF.pi()/RF(4*self.N)
z = CF(0,RF(2)*RF.pi()/RF(4*self.N)).exp()
[a,b,c,d]=M
fak=1; sig=1
if c<0: # need to use the reflection
sig=-1
if numeric==0:
fz=CyclotomicField(4).gen() # = -i
else:
fz=CF(0,1)
# the factor is rho(Z) sigma(Z,-A)
#if(c < 0 or (c==0 and d>0)):
# fak=-fz
#else:
#sig=1
#fz=1
fak=fz
a=-a; b=-b; c=-c; d=-d;
A=SL2Z([a,b,c,d])
if numeric==0:
chi=self.xi(A)
else:
chi=CF(self.xi(A).complex_embedding(prec))
elif(c==0): # then we use the simple formula
if(d<0):
sig=-1
if numeric==0:
fz=-CyclotomicField(4).gen()
else:
fz=CF(0,-1)
fak=fz
a=-a; b=-b; c=-c; d=-d;
else:
fak=1
for alpha in range(D):
arg=(b*alpha*alpha ) % D2
if(sig==-1):
rho[D-1-alpha,alpha]=fak*z**arg
else:
#print "D2=",D2
#print "b=",b
#print "arg=",arg
rho[alpha,alpha]=z**arg
return [rho,1]
else:
if numeric==0:
chi=self.xi(M)
else:
chi=CF(self.xi(M).complex_embedding(prec))
Nc=gcd(Integer(D),Integer(c))
#chi=chi*sqrt(CF(Nc)/CF(D))
if( valuation(Integer(c),2)==valuation(Integer(D),2)):
xc=Integer(N)
else:
xc=0
if(silent>0):
print "c=",c
print "xc=",xc
print "chi=",chi
for alpha in range(D):
al=QQ(alpha)/QQ(D)
for beta in range(D):
be=QQ(beta)/QQ(D)
c_div=False
if(xc==0):
alpha_minus_dbeta=(alpha-d*beta) % D
else:
alpha_minus_dbeta=(alpha-d*beta-xc) % D
if(silent > 0): # and alpha==7 and beta == 7):
print "alpha,beta=",alpha,',',beta
print "c,d=",c,',',d
print "alpha-d*beta=",alpha_minus_dbeta
invers=0
for r in range(D):
if((r*c - alpha_minus_dbeta) % D ==0):
c_div=True
invers=r
break
if(c_div and silent > 0):
print "invers=",invers
print " inverse(alpha-d*beta) mod c=",invers
elif(silent>0):
print " no inverse!"
if(c_div):
y=invers
if(xc==0):
argu=a*c*y**2+b*d*beta**2+2*b*c*y*beta
else:
argu=a*c*y**2+2*xc*(a*y+b*beta)+b*d*beta**2+2*b*c*y*beta
argu = argu % D2
tmp1=z**argu # exp(2*pi*I*argu)
if(silent >0):# and alpha==7 and beta==7):
print "a,b,c,d=",a,b,c,d
print "xc=",xc
print "argu=",argu
print "exp(...)=",tmp1
print "chi=",chi
print "sig=",sig
if(sig==-1):
minus_alpha = (D - alpha) % D
rho[minus_alpha,beta]=tmp1*chi
else:
rho[alpha,beta]=tmp1*chi
#print "fak=",fak
if numeric==0:
return [fak*rho,sqrt(QQ(Nc)/QQ(D))]
else:
return [CF(fak)*rho,RF(sqrt(QQ(Nc)/QQ(D)))]
def level(self):
return self._level
def from_discriminant(self,D):
r"""
Return the (r,n) s.t. D=n+-q(r).
"""
ZI=IntegerModRing(self._level)
if(self.is_dual()):
x=ZI(-D)
else:
x=ZI(D)
for j in self.D:
x=self.Qv[j]
n=QQ(D)/QQ(self._level)-QQ(x)
if(n % self._level == 0):
print "D/4N-q(v)=",n
return (self.D[j],ZZ(n/self._level))
def _xc(self,c,as_int=False):
r"""
Return the element x_c of order 2 (for this Discriminant form x_c=0 or 1/2)
INPUT:
-''c'' -- integer
-''as_int'' -- logical, if true then we return the set D^c as a list of integers
"""
x_c=0
if(valuation(2*self.N,2)==valuation(c,2)):
if(as_int):
x_c=self.N
else:
x_c=QQ(1)/QQ(2)
return x_c
def _D_times_c(self,c,as_int=False):
r"""
Return the set D^c={cx | x in D}
INPUT:
-''c'' -- integer
-''s_int'' -- logical, if true then we return the set D^c as a list of integers
"""
Dc=list()
if(as_int):
setD=self.D_as_integers
else:
setD=self.D
for x in setD:
if(as_int):
z=(c*x) % len(self.D)
else:
y=c*x
p=y.numer(); q=y.denom(); z=(p % q)/q
#print "c*",x,"=",z
Dc.append(z)
Dc.sort()
# make unique
for x in Dc:
i=Dc.count(x)
if(i>1):
for j in range(i-1):
Dc.remove(x)
return Dc
def _D_lower_c(self,c,as_int=False):
r"""
Return the set D_c={x in D| cx = 0}
INPUT:
-''c'' -- integer
-''s_int'' -- logical, if true then we return the set D^c as a list of integers
"""
Dc=list()
if(as_int):
setD=self.D_as_integers
else:
setD=self.D
for x in setD:
if(as_int):
z=(c*x) % len(self.D)
else:
y=c*x
p=y.numer(); q=y.denom(); z=(p % q)/q
#print "c*",x,"=",z
if(z==0):
Dc.append(x)
Dc.sort()
# make unique
for x in Dc:
i=Dc.count(x)
if(i>1):
for j in range(i-1):
Dc.remove(x)
return Dc
def _D_times_c_star(self,c,as_int=False):
r"""
Return the set D^c*=x_c+{c*x | x in D}, where x_c=0 or 1/2
INPUT:
-''c'' -- integer
-''as_int'' -- logical, if true then we return the set D^c as a list of integers
"""
Dc=self._D_times_c(c,as_int)
Dcs=list()
x_c=self._xc(c,as_int)
for x in Dc:
if(as_int):
z=(x + x_c) % len(self.D)
else:
y=QQ(c*x)+x_c
p=y.numer(); q=y.denom(); z=(p % q)/q
#print "c*",x,"=",z
Dcs.append(z)
Dcs.sort()
# make unique
for x in Dcs:
i=Dcs.count(x)
if(i>1):
for j in range(i-1):
Dcs.remove(x)
return Dcs
def maximal_isotropic_subgroup(self):
r"""
Returns the maximal isotropic subgroup of self.
"""
S=list()
for a in self.D:
if(self.Q(a)==0 and a<>0):
S.append(a)
# S is now a list of all isotropic elements except 0
PS=list(powerset(S))
PS.reverse()
# PS now contains all subsets of isotropic elements (except 0)
# with decreasing sizes. We now need to find the first, which together with 0
# is a group.
#print "PS=",PS
for A in PS:
A.append(0)
#print "Test the isotropic set: S=",A
ok=True
for x in A:
for y in A:
z=red_mod1(x+y)
if(not z in A):
#print "S was not a subgroup!"
ok=False
if(ok):
A.sort()
return A
raise ArithmeticError, "Could not find maximal isotropic subgroup!"
| Python |
#-*- coding: utf-8 -*-
r"""
Programs to compute holomorphic vector-valued Poincaré series.
In particular to find a basis and compute aq gram matrix consisting of the Fouriercoefficients p_{D_i,r_i}(D_j,r_j)
AUTHOR:
- Fredrik Strömberg (May 2010)
EXAMPLES::
sage: time A=make_gram_matrices(1,5,19.5,100,1E-20,force_prec=True)
CPU times: user 13.21 s, sys: 0.07 s, total: 13.28 s
Wall time: 13.31 s
sage: A[1]['data']
{0: 1.9997172464628335446644215348, 1: 0.038518031648348148837786590124, 2: 2.0220235164389306047369407568}
sage: A[1]['maxerr']
2.5825453423927096265697084347e-24
sage: A[1]['indices']
{0: [-3, 1], 1: [-4, 0]}
"""
#*****************************************************************************
# Copyright (C) 2010 Fredrik Strömberg <stroemberg@mathematik.tu-darmstadt.de>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import re,os
p= re.compile('MatrixSpace')
from poincare_series_alg_vv import *
import mpmath
# Unless the sage-addd-ons are installed
try:
d = dimension_jac_cusp_forms(2,1,-1)
#load 'sage-add-ons/nils/jacobiforms/dimension_jac_forms.sage'
#load 'sage-add-ons/nils/jacobiforms/sz_trace_formula.sage'
except:
## If this does not exist.
def dimension_jac_cusp_forms(a,b,c):
raise NotImplementedError,"You need the 'dimension_jac_form' package! Please call this routine with a set dimension instead!"
silent=0
def get_discs(N,dmax,sgn):
r"""
Computes a list of pairs (D,r) where D is a discriminant with D=sgn*r^2 mod 4N
and |D|<=dmax
"""
ZR=IntegerModRing(4*N)
lp=list()
ln=list()
for j in range(1,dmax):
D=j
for r in range(1,N):
if(ZR(D-r*r)==0):
#print D,r
lp.append((D,r))
for j in range(1,dmax):
D=-j
for r in range(1,N):
if(ZR(D-r*r)==0):
#print D,r
ln.append((D,r))
return lp,ln
def rn_from_Dn(N,sgn,l):
r""" compute (n,r) from (D,r) with D=4Nn+sgn*(r^2 mod 4N)
"""
[D,r]=l
N4=4*N
ZR=IntegerModRing(4*N)
x= ZR(D -sgn*r*r)
if( x <> 0):
raise ValueError, " Need D=sgn*r^2 mod 4N got N=%s, r=%s D=%s" %(N,r,D)
n=(D-sgn*r*r)/(4*N)
return [n,r]
def rn_from_D(N,sgn,D):
r""" compute (n,r) from (D,r) with D=4Nn+sgn*(r^2 mod 4N)
"""
N4=4*N
ZR=IntegerModRing(4*N)
DD=sgn*ZR(D)
if(not DD.is_square()):
raise ValueError, " Need D square mod 4N got N=%s, D=%s" %(N,D)
for j in range(N):
x= ZR(D -sgn*j*j)
if( x == 0):
r=j
n=(D-sgn*r*r)/(4*N)
exit
return [n,r]
## Function to compute a whole set of Fourier coefficients for Poincaré series at once
def ps_coefficients_holomorphic_vec(N,weight,l,tol=1E-40,prec=501,maxit=10000,force_prec=False):
r""" Coefficients of vector-valued holomorphic Poincaré series
for the dual Weil representation corr. to the lattice ZZ with quadratic form q(x)=Nx^2.
(Note: dual => r(T)_{a,b}=delta_{a,b}*e(-q(a))
INPUT: l=list of data for coefficients to compute
format='Disc' if data supplied is a list of tuples
[([D,r],[D',r']),...] where D,D' are fundamental discriminants
and D==r^2 mod 4*N and D'==r'^2 mod 4N
The coefficients we are computing are the (r',D')-th coefficient of the (r,D)-th Poincaré series.
Conversion between the two formats are:
-D/4N = n - r^2/4N (for the dual representation) and
D/4N = n + r^2/4N for the standard weil representation
"""
NNmax=0
NN=dict()
b_m=0
b_p=1
if(silent>1):
print "l=",l
try:
l.keys()
ll=l
except AttributeError:
ll=dict()
for j in range(len(l)):
ll[j]=(l[j][0][0],l[j][0][1],l[j][1][0],l[j][1][1])
for j in ll.keys():
NN[j]=get_trunc_bd(N,weight,ll[j],prec,tol=0.1*tol)
if(NN[j]>NNmax):
NNmax=NN[j]
if(NNmax>1E10):
raise ValueError,"Need too many (%s) terms! Probably to small weight!" %NN
if(silent>1):
print "NNmax=",NNmax
#for x in NN.keys():
# #print "NN(",x,")=",NN[x]
#print "tol2=",tol
#print "force_prec(os_coeff)=",force_prec
res=holom_poincare_c_vec(ll,weight,N,NNmax,maxit,b_m,b_p,prec,tol,force_prec)
#print "Data is ok?:",res['ok']
#print "Data=",res['data']
return res
def gram_matrix(N,weight,prec=501,tol=1E-40,sv_min=1E-1,sv_max=1E15,bl=None,set_dim=None,force_prec=False):
r""" Computes a matrix of p_{r,D}(r',D')
for a basis of P_{r,D}, i.e. dim linearly independent P's
INPUT: N = Integer
weight = Real
OPTIONAL:
tol = error bound for the Poincaré series
sv_min = minimal allowed singular value when determining whether a given set is linarly independent or not.
sv_max = maximally allowed singular value
bl = list of pairs (D_i,r_i) from which we compute a matrix of coeffficients p_{D_i,r_i}(D_j,r_j)
"""
# If we have supplied a list of D's and r's we make a gram matrix relative to these
# otherwise we find a basis, i.e. linearly independent forms with correct dimension
# find the dimension
wt='%.4f'% weight
if(N<10):
stN="0"+str(N)
else:
stN=str(N)
v=dict()
filename_work="__N"+stN+"-"+wt+"--finding basis.txt"
fp=open(filename_work,"write")
fp.write("starting to find basis")
fp.close()
if(silent>0):
print "Forcing precision:",force_prec
set_silence_level(0)
if(bl<>None):
dim=len(bl)
l=bl
else:
if(set_dim<>None and set_dim >0):
dim=set_dim
else:
dim=dimension_jac_cusp_forms(int(weight+0.5),N,-1)
l=list_of_basis(N,weight,prec,tol,sv_min,sv_max,set_dim=dim)
j=0
for [D,r] in l.values():
for [Dp,rp] in l.values():
# Recall that the gram matrix is symmetric. We need only compute the upper diagonal
if(v.values().count([Dp,rp,D,r])==0):
v[j]=[D,r,Dp,rp]
j=j+1
# now v is a list we can get into computing coefficients
# first we print the "gram data" (list of indices) to the file
s=str(N)+": (AI["+str(N)+"],["
indices=dict()
for j in range(len(l)):
Delta=l[j][0]
r=l[j][1]
diff=(r*r-Delta) % (4*N)
if(diff<>0):
raise ValueError, "ERROR r^2=%s not congruent to Delta=%s mod %s!" %(r*r, Delta, 4*N)
s=s+"("+str(Delta)+","+str(r)+")"
indices[j]=[Delta,r]
if(j<len(l)-1):
s=s+","
else:
s=s+"]),"
s=s+"\n"
if(silent>0):
print s+"\n"
filename2="PS_Gramdata"+stN+"-"+wt+".txt"
fp=open(filename2,"write")
fp.write(s)
fp.close()
try:
os.remove(filename_work)
except os.error:
print "Could not remove file:",filename_work
pass
filename_work="__N"+stN+"-"+wt+"--computing_gram_matrix.txt"
fp=open(filename_work,"write")
fp.write("")
fp.close()
#print "tol=",tol
#set_silence_level(2)
#print "force_prec(gram_mat)=",force_prec
res=ps_coefficients_holomorphic_vec(N,weight,v,tol,prec,force_prec=force_prec)
set_silence_level(0)
res['indices']=indices
maxerr=0.0
for j in res['errs'].keys():
tmperr=abs(res['errs'][j])
#print "err(",j,")=",tmperr
if(tmperr>maxerr):
maxerr=tmperr
# switch format for easier vewing
res['errs'][j]=RR(tmperr)
if(silent>0):
print "maxerr=",RR(maxerr)
res['maxerr']=maxerr
wt_phalf='%.4f'% (weight+0.5)
filename3="PS_Gramerr"+stN+"-"+wt+".txt"
fp=open(filename3,"write")
wt
s="MAXERR["+wt_phalf+"]["+stN+"]="+str(RR(maxerr))
fp.write(s)
fp.close()
if(res['ok']):
Cps=res['data']
else:
print "Failed to compute Fourier coefficients!"
return 0
RF=RealField(prec)
A=matrix(RF,dim)
kappa=weight
fourpi=RF(4.0)*pi.n(prec)
one=RF(1.0)
N4=RF(4*N)
C=dict()
if(silent>1):
print "v=",v
print "dim=",dim
lastix=0
# First set the upper right part of A
for j in range(dim):
ddim=dim-j
if(silent>1):
print "j=",j,"ddim=",ddim," lastix=",lastix
for k in range(0,ddim):
# need to scale with |D|^(k+0.5)
if(silent>1):
print "k=",k
print "lastix+k=",lastix+k
mm=RF(abs(v[lastix+k][0]))/N4
tmp=RF(mm**(weight-one))
if(silent>1):
print "ddim+k=",ddim+k
A[j,j+k]=Cps[lastix+k]*tmp
C[v[lastix+k][0],v[lastix+k][1]]=Cps[lastix+k]
lastix=lastix+k+1
# And add the lower triangular part to mak the matrix symmetric
for j in range(dim):
for k in range(0,j):
A[j,k]=A[k,j]
# And print the gram matrix
res['matrix']=A
dold=mpmath.mp.dps
mpmath.mp.dps=int(prec/3.3)
AInt=mpmath.matrix(int(A.nrows()),int(A.ncols()))
AMp=mpmath.matrix(int(A.nrows()),int(A.ncols()))
for ir in range(A.nrows()):
for ik in range(A.ncols()):
AInt[ir,ik]=mpmath.mpi(A[ir,ik]-tol,A[ir,ik]+tol)
AMp[ir,ik]=mpmath.mpf(A[ir,ik])
d=mpmath.det(AMp)
if(silent>1):
print "det(A-as-mpmath)=",d
di=mpmath.det(AInt)
if(silent>1):
print "det(A-as-interval)=",di
res['det']=(RF(di.a),RF(di.b))
filename="PS_Gram"+stN+"-"+wt+".txt"
if(silent>1):
print "printing to file: "+filename
print_matrix_to_file(A,filename,'A['+str(N)+']')
if(silent>1):
print "A-A.transpose()=",norm(A-A.transpose())
B=A^-1
#[d,B]=mat_inverse(A)
if(silent>1):
print "A=",A.n(100)
print "det(A)=",di
print "Done making inverse!"
#res['det']=d
res['inv']=B
mpmath.mp.dps=dold
filename="PS_Gram-inv"+stN+"-"+wt+".txt"
print_matrix_to_file(B,filename,' AI['+str(N)+']')
# first make the filename
s='%.1e'%tol
filename3="PS_Coeffs"+stN+"-"+wt+"-"+s+".sobj"
# If the file already exist we load it and append the new data
if(silent>0):
print "saving data to ",filename3
try:
f=open(filename3,"read")
except IOError:
if(silent>0):
print "no file before!"
# do nothing
else:
if(silent>0):
print "file: "+filename3+" exists!"
f.close()
Cold=load(filename3)
for key in Cold.keys():
# print"key:",key
if(not C.has_key(key)): # then we addd it
print"key:",key," does not exist in the new version!"
C[key]=Cold[key]
save(C,filename3)
## Save the whole thing
filename="PS_all_gram"+stN+"-"+wt+".sobj"
save(res,filename)
## our work is comleted and we can remove the file
try:
os.remove(filename_work)
except os.error:
print "Could not remove file:",filename_work
pass
return res
def list_of_basis(N,weight,prec=501,tol=1e-20,sv_min=1E-1,sv_max=1E15,set_dim=None):
r""" Returns a list of pairs (r,D) forming a basis
"""
# First we find the smallest Discriminant for each of the components
if(set_dim<>None and set_dim >0):
dim=set_dim
else:
dim=dimension_jac_cusp_forms(int(weight+0.5),N,-1)
basislist=dict()
num_gotten=0
co_tmp=dict()
num_gotten=0
C0=1
RF=RealField(prec)
if(silent>1):
print "N=",N
print "dim=",dim
print "sv_min=",sv_min
print "sv_max=",sv_max
Aold=Matrix(RF,1)
tol0=1E-20 #tol
# we start with the first discriminant, then the second etc.
Z2N=IntegerModRing(2*N)
ZZ4N=IntegerModRing(4*N)
for Dp in [1..max(1000,100*dim)]:
D=-Dp # we use the dual of the Weil representation
D4N=ZZ4N(D)
if(not(is_square(D4N))):
continue
for r in my_modsqrt(D4N,N):
# I want to make sure that P_{(D,r)} is independent from the previously computed functions
# The only sure way to do this is to compute all submatrices (to a much smaller precision than what we want at the end)
# The candidate is [D,r] and we need to compute the vector of [D,r,D',r']
# for all D',r' already in the list
ltmp1=dict()
ltmp2=dict()
j=0
for [Dp,rp] in basislist.values():
ltmp1[j]=[D,r,Dp,rp]
ltmp2[j]=[Dp,rp,D,r]
j=j+1
ltmp1[j]=[D,r,D,r]
#print "Checking: D,r,D,r=",ltmp1
ctmp1=ps_coefficients_holomorphic_vec(N,weight,ltmp1,tol0)
# print "ctmp1=",ctmp1
if(j >0):
#print "Checking: D,r,Dp,rp=",ltmp2 # Data is ok?: {0: True}
ctmp2=ps_coefficients_holomorphic_vec(N,weight,ltmp2,tol0)
# print "ctmp2=",ctmp2
#print "num_gotten=",num_gotten
A=matrix(RF,num_gotten+1)
# The old matrixc with the elements that are already added to the basis
# print "Aold=\n",A,"\n"
# print "num_gotten=",num_gotten
# print "Aold=\n",Aold,"\n"
for k in range(Aold.nrows()):
for l in range(Aold.ncols()):
A[k,l]=Aold[k,l]
# endfor
# print "A set by old=\n",A,"\n"
# Add the (D',r',D,r) for each D',r' in the list
tmp=RF(1.0)
for l in range(num_gotten):
# we do not use the scaling factor when
# determining linear independence
# mm=RF(abs(ltmp2[l][0]))/N4
# tmp=RF(mm**(weight-one))
A[num_gotten,l]=ctmp2['data'][l]*tmp
# Add the (D,r,D',r') for each D',r' in the list
# print "ctmp1.keys()=",ctmp1.keys()
for l in range(num_gotten+1):
#mm=RF(abs(ltmp1[l][2]))/4N
#tmp=RF(mm**(weight-one))
# print "scaled with=",tmp.n(200)
A[l,num_gotten]=ctmp1['data'][l]*tmp
#[d,B]=mat_inverse(A) # d=det(A)
#if(silent>1):
#d=det(A)
#print "det A = ",d
# Now we have to determine whether we have a linearly independent set or not
dold=mpmath.mp.dps
mpmath.mp.dps=int(prec/3.3)
AInt=mpmath.matrix(int(A.nrows()),int(A.ncols()))
AMp=mpmath.matrix(int(A.nrows()),int(A.ncols()))
if(silent>0):
print "tol0=",tol0
for ir in range(A.nrows()):
for ik in range(A.ncols()):
AInt[ir,ik]=mpmath.mp.mpi(A[ir,ik]-tol0,A[ir,ik]+tol0)
AMp[ir,ik]=mpmath.mpf(A[ir,ik])
d=mpmath.det(AMp)
di=mpmath.mp.mpi(mpmath.mp.det(AInt))
#for ir in range(A.nrows()):
# for ik in range(A.ncols()):
# #print "A.d=",AInt[ir,ik].delta
if(silent>0):
print "mpmath.mp.dps=",mpmath.mp.dps
print "det(A)=",d
print "det(A-as-interval)=",di
print "d.delta=",di.delta
#if(not mpmath.mpi(d) in di):
# raise ArithmeticError," Interval determinant not ok?"
#ANP=A.numpy()
#try:
# u,s,vnp=svd(ANP) # s are the singular values
# sl=s.tolist()
# mins=min(sl) # the smallest singular value
# maxs=max(sl)
# if(silent>1):
# print "singular values = ",s
#except LinAlgError:
# if(silent>0):
# print "could not compute SVD!"
# print "using abs(det) instead"
# mins=abs(d)
# maxs=abs(d)
#if((mins>sv_min and maxs< sv_max)):
zero=mpmath.mpi(0)
if(zero not in di):
if(silent>1):
print "Adding D,r=",D,r
basislist[num_gotten]=[D,r]
num_gotten=num_gotten+1
if(num_gotten>=dim):
return basislist
else:
#print "setting Aold to A"
Aold=A
else:
if(silent>1):
print " do not use D,r=",D,r
# endif
mpmath.mp.dps=dold
# endfor
if(num_gotten < dim):
raise ValueError," did not find enough good elements for a basis list!"
def make_gram_matrices(N1,N2,k,prec=501,tol=1E-40,sv_min=1E-1,sv_max=1E15,force_prec=False):
A=dict()
for N in range(N1,N2+1):
A[N]=gram_matrix(N,k,prec,tol,sv_min,sv_max,force_prec=force_prec)
return A
def print_matrix_to_file(A,filename,varname='A'):
dimr=A.nrows()
dimc=A.ncols()
prec=A.base_ring().prec()
fu=open(filename,"write")
# fu.write(varname+"=matrix(RealField("+str(prec)+"),[")
fu.write(varname+"=matrix(RF,[")
for j in range(dimr):
fu.write("[")
for k in range(dimc):
fu.write(str(A[j,k]))
if(k<dimc-1):
fu.write(",")
if(j<dimr-1):
fu.write("],")
else:
fu.write("]")
fu.write("])\n")
fu.close()
# the square root mod 2N is a set-valued function
def my_modsqrt(a,N):
r""" Returns a list of $r\in{1,...,2N}$ with $r^2 \equiv a \mod 4 N$
"""
N4=4*N
amod4N=a % N4
#print "amod4N=",amod4N
sq=[]
for i in range(0,N+1):
#print "i*i mod 4N=",((i*i) % N4 )
if( ((i*i) % N4 ) == amod4N):
sq.append(i)
return sq
def upper_submatrix(A,j0,k0):
r"""
Gives the upper submatrix
B=A[j,k], j<=j0, k<=k0
"""
if(j0 > A.nrows()):
j0=A.nrows()
if( k0 > A.ncols()):
k0=A.ncols()
B=matrix(A.base_ring(),j0+1,k0+1)
for j in range(j0+1):
for k in range(k0+1):
B[j,k]=A[j,k]
return B
| Python |
r"""
Algorithms for holomorphic and non-holomorphic Poincare series.
"""
from sage.all import ComplexField,inverse_mod
from multiplier_systems import *
from mysubgroup import *
from poincare_series_alg import *
class PoincareSeries(SageObject):
def __init__(self,G,k=0,prec=53,multiplier=None,verbose=0,holomorphic=True):
r"""
Setup the associated space.
"""
if isinstance(G,(int,Integer)):
self._N=G
self._group=MySubgroup(Gamma0(G))
else:
self._group=G
self._N=G.level()
self._k = k
self._verbose = verbose
self._holomorphic = holomorphic
self._prec=prec
self._RF=RealField(prec)
self._CF=ComplexField(prec)
def K(self,m,n,c):
r"""
K(m,n,c) = sum_{d (c)} e((md+n\bar{d})/c)
"""
summa=0
z=CyclotomicField(c).gen()
print "z=",z
for d in range(c):
if gcd(d,c)>1:
continue
try:
dbar = inverse_mod(d,c)
except ZeroDivisionError:
print "c=",c
print "d=",d
raise ZeroDivisionError
arg=m*dbar+n*d
#print "arg=",arg
summa=summa+z**arg
return summa
def C(self,m,n,Nmax=50):
k = self._k
if n>0:
if self._holomorphic:
res=Aplus_triv(m,k,n,self._N,self._prec,Nmax,verbose=self._verbose)
else:
res=Bplus_triv(m,k,n,self._N,self._prec,Nmax,verbose)
elif n==0:
if self._holomorphic:
res=Azero_triv(m,k,n,self._N,self._prec,Nmax,self._verbose)
else:
res=Bzero_triv(m,k,n,self._N,self._prec,Nmax,self._verbose)
else:
if self._holomorphic:
res=Aminus_triv(m,k,n,self._N,self._prec,Nmax,self._verbose)
else:
res=Bminus_triv(m,k,n,self._N,self._prec,Nmax,self._verbose)
return res
def B0plus(self,m,n,N=10):
k = self._k
res=Bplus_triv(m,k,n,self._N,self._prec,N)
#if n==0:
# summa=0
# for c in range(1,N):
# cn=c*self._N
# #term=self._CF(self.K(m,n,cn))/self._RF(cn)**2
# term=Ktriv(self._N,m,n,cn,self._prec)/self._RF(cn)**2
# summa=summa+term
#f=self._RF(4)*self._RF.pi()**2
#return f*summa
return res
def B0plus_old(self,m,n,k=2,N=10):
if n==0:
summa=self._CF(0)
for c in range(1,N):
cn=c*self._N
#term=self._CF(self.K(m,n,cn))/self._RF(cn)**2
term=self.K(m,n,cn) # Ktriv(self._N,m,n,cn,self._prec)/self._RF(cn)**2
#print "K,m,n(",cn,")=",term
term=self._CF(term)/self._RF(cn)**k
#print "term(",cn,")=",term
summa=summa+term
f=self._RF(2**k)*self._RF.pi()**k
f=f*self._RF(m)**(k-1)
f=f*self._CF(0,1)**(self._RF(k+2))
f=f/self._RF(factorial(k-1))
return f*summa
def Aminus_triv(self,m,n):
k = self._k
N = self._N
if n==0:
summa=self._CF(0)
for c in range(1,N):
cn=c*self._N
#term=self._CF(self.K(m,n,cn))/self._RF(cn)**2
term=self.K(m,n,cn) # Ktriv(self._N,m,n,cn,self._prec)/self._RF(cn)**2
#print "K,m,n(",cn,")=",term
term=self._CF(term)/self._RF(cn)**k
#print "term(",cn,")=",term
summa=summa+term
f=self._RF(2**k)*self._RF.pi()**k
f=f*self._RF(m)**(k-1)
f=f*self._CF(0,1)**(self._RF(k+2))
f=f/self._RF(factorial(k-1))
return f*summa
| Python |
#################################################################################
#
# (c) Copyright 2010 Fredrik Stroemberg
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
from psage.modform.maass import *
def test_vv(N=11):
return
WR=WeilRepDiscriminantForm(11,dual=True)
M=VVHarmonicWeakMaassForms(WR,0.5,15)
PP={(7/22,0):1}
F=M.get_element(PP,12)
print F
def test_construction_of_space(N=1):
M = MaassWaveForms(N)
M.get_element_in_range(9,10)
def test_group_list():
l=get_list_of_valid_signatures(6)
list_all_admissable_pairs(l[0],verbose=2)
def _test_permutation_iterator_1():
r"""
Make sure that the permutation iterator works as it should.
Type of iterator: entire S_n
"""
PI = MyPermutationIterator(4)
assert len(PI.list())==24
for x in PI:
pass
return
def _test_permutation_iterator_2():
r"""
Make sure that the permutation iterator works as it should.
Type of iterator: set of permutations with a given set of fixed points.
"""
PI=MyPermutationIterator(6,fixed_pts=[1,2])
assert len(PI.list())==9
for x in PI:
pass
return
def _test_permutation_iterator_3():
r"""
Make sure that the permutation iterator works as it should.
Type of iterator: permutations without fixed points.
"""
PI=MyPermutationIterator(6,num_fixed=0)
assert len(PI.list())==265
for x in PI:
pass
return
def _list_of_perms_test(N,type=1):
r"""
Make sure that the permutation iterator works as it should.
Type of iterator: permutations without fixed points.
"""
PI=MyPermutationIterator(N,num_fixed=0)
if type==1:
for x in PI:
y=x
pass
elif type==2:
try:
while True: #for x in range(0,PI.max_num()):
x = PI.current_perm()
PI._next()
except StopIteration:
pass
else:
try:
while True: #for x in range(0,PI.max_num()):
x = PI.current_perm()
PI.__next__()
except StopIteration:
pass
return
| Python |
""" Excerpt from my MySubgroup class. Contains routines to draw fundamental domains.
r"""
import matplotlib.patches as patches
import matplotlib.path as path
from sage.all import I,Gamma0,Gamma1,Gamma,SL2Z,ZZ,RR,ceil,sqrt,CC,line,text,latex,exp,pi,infinity
def draw_fundamental_domain(N,group='Gamma0',model="H",axes=None,filename=None,**kwds):
r""" Draw fundamental domain
INPUT:
- ''model'' -- (default ''H'')
= ''H'' -- Upper halfplane
= ''D'' -- Disk model
- ''filename''-- filename to print to
- ''**kwds''-- additional arguments to matplotlib
- ''axes'' -- set geometry of output
=[x0,x1,y0,y1] -- restrict figure to [x0,x1]x[y0,y1]
EXAMPLES::
sage: G=MySubgroup(Gamma0(3))
sage: G.draw_fundamental_domain()
"""
G=eval(group+'('+str(N)+')')
#print G
name ="$"+latex(G)+"$"
## need a "nice" set of coset representatives to draw a connected fundamental domain. Only implemented for Gamma_0(N)
coset_reps = nice_coset_reps(G)
#if(group=='Gamma0'):
#else:
#coset_reps = list(G.coset_reps())
from matplotlib.backends.backend_agg import FigureCanvasAgg
if(model=="D"):
g=draw_funddom_d(coset_reps,format,I)
else:
g=draw_funddom(coset_reps,format)
if(axes<>None):
[x0,x1,y0,y1]=axes
elif(model=="D"):
x0=-1 ; x1=1 ; y0=-1.1 ; y1=1
else:
# find the width of the fundamental domain
w=0 #self.cusp_width(Cusp(Infinity))
wmin=0 ; wmax=1
max_x = RR(0.55)
rho = CC( exp(2*pi*I/3))
for V in coset_reps:
## we also compare the real parts of where rho and infinity are mapped
r1 = (V.acton(rho)).real()
if(V[1,0]<>0):
inf1 = RR(V[0,0] / V[1,0])
else:
inf1 = 0
if(V[1 ,0 ]==0 and V[0 ,0 ]==1 ):
if(V[0 ,1 ]>wmax):
wmax=V[0 ,1 ]
if(V[0 ,1 ]<wmin):
wmin=V[0 ,1 ]
if( max(r1,inf1) > max_x):
max_x = max(r1,inf1)
#print "wmin,wmax=",wmin,wmax
#x0=-1; x1=1; y0=-0.2; y1=1.5
x0=RR(-max_x) ; x1=RR(max_x) ; y0=RR(-0.15) ; y1=RR(1.5)
## Draw the axes manually (since can't figure out how to do it automatically)
ax = line([[x0,0.0],[x1,0.0]],color='black')
#ax = ax + line([[0.0,y0],[0.0,y1]],color='black')
## ticks
ax = ax + line([[-0.5,-0.01],[-0.5,0.01]],color='black')
ax = ax + line([[0.5,-0.01],[0.5,0.01]],color='black')
g = g + ax
if model=="H":
t = text(name, (0, -0.1), fontsize=16, color='black')
else:
t = text(name, (0, -1.1), fontsize=16, color='black')
g = g + t
g.set_aspect_ratio(1)
g.set_axes_range(x0,x1,y0,y1)
g.axes(False)
if(filename<>None):
fig = g.matplotlib()
fig.set_canvas(FigureCanvasAgg(fig))
axes = fig.get_axes()[0]
axes.minorticks_off()
axes.set_yticks([])
fig.savefig(filename,**kwds)
else:
return g
# g.show(figsize=[5,5])
def draw_funddom(coset_reps,format="S"):
r""" Draw a fundamental domain for G.
INPUT:
- ``format`` -- (default 'Disp') How to present the f.d.
- ``S`` -- Display directly on the screen
EXAMPLES::
sage: G=MySubgroup(Gamma0(3))
sage: G._draw_funddom()
"""
pi=RR.pi()
pi_3 = pi / RR(3.0)
from sage.plot.plot import (Graphics,line)
from sage.functions.trig import (cos,sin)
g=Graphics()
x1=RR(-0.5) ; y1=RR(sqrt(3 )/2 )
x2=RR(0.5) ; y2=RR(sqrt(3 )/2 )
xmax=RR(20.0)
l1 = line([[x1,y1],[x1,xmax]])
l2 = line([[x2,y2],[x2,xmax]])
l3 = line([[x2,xmax],[x1,xmax]]) # This is added to make a closed contour
c0=_circ_arc(RR(pi/3.0) ,RR(2.0*pi)/RR(3.0) ,0 ,1 ,100 )
tri=c0+l1+l3+l2
g=g+tri
for A in coset_reps:
if list(A)==[1,0,0,1]:
continue
#print "A=",A
# [a,b,c,d]=A
# if(a==1 and b==0 and c==0 and d==1 ):
# continue
# if(a<0 ):
# a=RR(-a); b=RR(-b); c=RR(-c); d=RR(-d)
# else:
# a=RR(a); b=RR(b); c=RR(c); d=RR(d)
# if(c==0 ): # then this is easier
# L0 = [[cos(pi_3*RR(i/100.0))+b,sin(pi_3*RR(i/100.0))] for i in range(100 ,201 )]
# L1 = [[x1+b,y1],[x1+b,xmax]]
# L2 = [[x2+b,y2],[x2+b,xmax]]
# L3 = [[x2+b,xmax],[x1+b,xmax]]
# c0=line(L0); l1=line(L1); l2=line(L2); l3=line(L3)
# tri=c0+l1+l3+l2
# g=g+tri
# else:
# den=(c*x1+d)**2 +c**2 *y1**2
# x1_t=(a*c*(x1**2 +y1**2 )+(a*d+b*c)*x1+b*d)/den
# y1_t=y1/den
# den=(c*x2+d)**2 +c**2 *y2**2
# x2_t=(a*c*(x2**2 +y2**2 )+(a*d+b*c)*x2+b*d)/den
# y2_t=y2/den
# inf_t=a/c
# #print "A=",A
# #print "arg1=",x1_t,y1_t,x2_t,y2_t
# c0=_geodesic_between_two_points(x1_t,y1_t,x2_t,y2_t)
# #print "arg1=",x1_t,y1_t,inf_t
# c1=_geodesic_between_two_points(x1_t,y1_t,inf_t,0. )
# #print "arg1=",x2_t,y2_t,inf_t
# c2=_geodesic_between_two_points(x2_t,y2_t,inf_t,0.0)
# tri=c0+c1+c2
tri=draw_transformed_triangle_H(A,xmax=xmax)
g=g+tri
return g
def draw_transformed_triangle_H(A,xmax=20):
r"""
Draw the modular triangle translated by A=[a,b,c,d]
"""
#print "A=",A,type(A)
pi=RR.pi()
pi_3 = pi / RR(3.0)
from sage.plot.plot import (Graphics,line)
from sage.functions.trig import (cos,sin)
x1=RR(-0.5) ; y1=RR(sqrt(3 )/2 )
x2=RR(0.5) ; y2=RR(sqrt(3 )/2 )
a,b,c,d = A #[0,0]; b=A[0,1]; c=A[1,0]; d=A[1,1]
if a<0:
a=RR(-a); b=RR(-b); c=RR(-c); d=RR(-d)
else:
a=RR(a); b=RR(b); c=RR(c); d=RR(d)
if c==0: # then this is easier
if a*d<>0:
a=a/d; b=b/d;
L0 = [[a*cos(pi_3*RR(i/100.0))+b,a*sin(pi_3*RR(i/100.0))] for i in range(100 ,201 )]
L1 = [[a*x1+b,a*y1],[a*x1+b,xmax]]
L2 = [[a*x2+b,a*y2],[a*x2+b,xmax]]
L3 = [[a*x2+b,xmax],[a*x1+b,xmax]]
c0=line(L0); l1=line(L1); l2=line(L2); l3=line(L3)
tri=c0+l1+l3+l2
else:
den=(c*x1+d)**2 +c**2 *y1**2
x1_t=(a*c*(x1**2 +y1**2 )+(a*d+b*c)*x1+b*d)/den
y1_t=y1/den
den=(c*x2+d)**2 +c**2 *y2**2
x2_t=(a*c*(x2**2 +y2**2 )+(a*d+b*c)*x2+b*d)/den
y2_t=y2/den
inf_t=a/c
#print "A=",A
#print "arg1=",x1_t,y1_t,x2_t,y2_t
c0=_geodesic_between_two_points(x1_t,y1_t,x2_t,y2_t)
#print "arg1=",x1_t,y1_t,inf_t
c1=_geodesic_between_two_points(x1_t,y1_t,inf_t,0. )
#print "arg1=",x2_t,y2_t,inf_t
c2=_geodesic_between_two_points(x2_t,y2_t,inf_t,0.0)
tri=c0+c1+c2
return tri
def draw_funddom_d(coset_reps,format="MP",z0=I):
r""" Draw a fundamental domain for self in the circle model
INPUT:
- ''format'' -- (default 'Disp') How to present the f.d.
= 'S' -- Display directly on the screen
- z0 -- (default I) the upper-half plane is mapped to the disk by z-->(z-z0)/(z-z0.conjugate())
EXAMPLES::
sage: G=MySubgroup(Gamma0(3))
sage: G._draw_funddom_d()
"""
# The fundamental domain consists of copies of the standard fundamental domain
pi=RR.pi()
from sage.plot.plot import (Graphics,line)
g=Graphics()
bdcirc=_circ_arc(0 ,2 *pi,0 ,1 ,1000 )
g=g+bdcirc
# Corners
x1=-RR(0.5) ; y1=RR(sqrt(3 )/2)
x2=RR(0.5) ; y2=RR(sqrt(3 )/2)
z_inf=1
l1 = _geodesic_between_two_points_d(x1,y1,x1,infinity)
l2 = _geodesic_between_two_points_d(x2,y2,x2,infinity)
c0 = _geodesic_between_two_points_d(x1,y1,x2,y2)
tri=c0+l1+l2
g=g+tri
for A in coset_reps:
[a,b,c,d]=A
if(a==1 and b==0 and c==0 and d==1 ):
continue
if(a<0 ):
a=-a; b=-b; c=-c; d=-1
if(c==0 ): # then this is easier
l1 = _geodesic_between_two_points_d(x1+b,y1,x1+b,infinity)
l2 = _geodesic_between_two_points_d(x2+b,y2,x2+b,infinity)
c0 = _geodesic_between_two_points_d(x1+b,y1,x2+b,y2)
# c0=line(L0); l1=line(L1); l2=line(L2); l3=line(L3)
tri=c0+l1+l2
g=g+tri
else:
den=(c*x1+d)**2 +c**2 *y1**2
x1_t=(a*c*(x1**2 +y1**2 )+(a*d+b*c)*x1+b*d)/den
y1_t=y1/den
den=(c*x2+d)**2 +c**2 *y2**2
x2_t=(a*c*(x2**2 +y2**2 )+(a*d+b*c)*x2+b*d)/den
y2_t=y2/den
inf_t=a/c
c0=_geodesic_between_two_points_d(x1_t,y1_t,x2_t,y2_t)
c1=_geodesic_between_two_points_d(x1_t,y1_t,inf_t,0.0 )
c2=_geodesic_between_two_points_d(x2_t,y2_t,inf_t,0.0 )
tri=c0+c1+c2
g=g+tri
g.xmax(1 )
g.ymax(1 )
g.xmin(-1 )
g.ymin(-1 )
g.set_aspect_ratio(1 )
return g
#### Methods not dependent explicitly on the group
def _geodesic_between_two_points(x1,y1,x2,y2):
r""" Geodesic path between two points hyperbolic upper half-plane
INPUTS:
- ''(x1,y1)'' -- starting point (0<y1<=infinity)
- ''(x2,y2)'' -- ending point (0<y2<=infinity)
- ''z0'' -- (default I) the point in the upper corresponding
to the point 0 in the disc. I.e. the transform is
w -> (z-I)/(z+I)
OUTPUT:
- ''ca'' -- a polygonal approximation of a circular arc centered
at c and radius r, starting at t0 and ending at t1
EXAMPLES::
sage: l=_geodesic_between_two_points(0.1,0.2,0.0,0.5)
"""
pi=RR.pi()
from sage.plot.plot import line
from sage.functions.trig import arcsin
#print "z1=",x1,y1
#print "z2=",x2,y2
if( abs(x1-x2)< 1E-10):
# The line segment [x=x1, y0<= y <= y1]
return line([[x1,y1],[x2,y2]]) #[0,0,x0,infinity]
c=RR(y1**2 -y2**2 +x1**2 -x2**2 )/RR(2 *(x1-x2))
r=RR(sqrt(y1**2 +(x1-c)**2 ))
r1=RR(y1/r); r2=RR(y2/r)
if(abs(r1-1 )< 1E-12 ):
r1=RR(1.0)
elif(abs(r2+1 )< 1E-12 ):
r2=-RR(1.0)
if(abs(r2-1 )< 1E-12 ):
r2=RR(1.0)
elif(abs(r2+1 )<1E-12 ):
r2=-RR(1.0)
if(x1>=c):
t1 = RR(arcsin(r1))
else:
t1 = RR(pi)-RR(arcsin(r1))
if(x2>=c):
t2 = RR(arcsin(r2))
else:
t2 = RR(pi)-arcsin(r2)
tmid = (t1+t2)*RR(0.5)
a0=min(t1,t2)
a1=max(t1,t2)
#print "c,r=",c,r
#print "t1,t2=",t1,t2
return _circ_arc(t1,t2,c,r)
def _geodesic_between_two_points_d(x1,y1,x2,y2,z0=I):
r""" Geodesic path between two points represented in the unit disc
by the map w = (z-I)/(z+I)
INPUTS:
- ''(x1,y1)'' -- starting point (0<y1<=infinity)
- ''(x2,y2)'' -- ending point (0<y2<=infinity)
- ''z0'' -- (default I) the point in the upper corresponding
to the point 0 in the disc. I.e. the transform is
w -> (z-I)/(z+I)
OUTPUT:
- ''ca'' -- a polygonal approximation of a circular arc centered
at c and radius r, starting at t0 and ending at t1
EXAMPLES::
sage: l=_geodesic_between_two_points_d(0.1,0.2,0.0,0.5)
"""
pi=RR.pi()
from sage.plot.plot import line
from sage.functions.trig import (cos,sin)
# First compute the points
if(y1<0 or y2<0 ):
raise ValueError,"Need points in the upper half-plane! Got y1=%s, y2=%s" %(y1,y2)
if(y1==infinity):
P1=CC(1 )
else:
P1=CC((x1+I*y1-z0)/(x1+I*y1-z0.conjugate()))
if(y2==infinity):
P2=CC(1 )
else:
P2=CC((x2+I*y2-z0)/(x2+I*y2-z0.conjugate()))
# First find the endpoints of the completed geodesic in D
if(x1==x2):
a=CC((x1-z0)/(x1-z0.conjugate()))
b=CC(1 )
else:
c=RR(y1**2 -y2**2 +x1**2 -x2**2 )/RR(2 *(x1-x2))
r=RR(sqrt(y1**2 +(x1-c)**2 ))
a=c-r
b=c+r
a=CC((a-z0)/(a-z0.conjugate()))
b=CC((b-z0)/(b-z0.conjugate()))
if( abs(a+b) < 1E-10 ): # On a diagonal
return line([[P1.real(),P1.imag()],[P2.real(),P2.imag()]])
th_a=a.argument()
th_b=b.argument()
# Compute the center of the circle in the disc model
if( min(abs(b-1 ),abs(b+1 ))< 1E-10 and min(abs(a-1 ),abs(a+1 ))>1E-10 ):
c=b+I*(1 -b*cos(th_a))/sin(th_a)
elif( min(abs(b-1 ),abs(b+1 ))> 1E-10 and min(abs(a-1 ),abs(a+1 ))<1E-10 ):
c=a+I*(1 -a*cos(th_b))/RR(sin(th_b))
else:
cx=(sin(th_b)-sin(th_a))/sin(th_b-th_a)
c=cx+I*(1 -cx*cos(th_b))/RR(sin(th_b))
# First find the endpoints of the completed geodesic
r=abs(c-a)
t1=CC(P1-c).argument()
t2=CC(P2-c).argument()
#print "t1,t2=",t1,t2
return _circ_arc(t1,t2,c,r)
def _circ_arc(t0,t1,c,r,num_pts=5000 ):
r""" Circular arc
INPUTS:
- ''t0'' -- starting parameter
- ''t1'' -- ending parameter
- ''c'' -- center point of the circle
- ''r'' -- radius of circle
- ''num_pts'' -- (default 100) number of points on polygon
OUTPUT:
- ''ca'' -- a polygonal approximation of a circular arc centered
at c and radius r, starting at t0 and ending at t1
EXAMPLES::
sage: ca=_circ_arc(0.1,0.2,0.0,1.0,100)
"""
from sage.plot.plot import line,parametric_plot
from sage.functions.trig import (cos,sin)
from sage.all import var
t00=t0; t11=t1
## To make sure the line is correct we reduce all arguments to the same branch,
## e.g. [0,2pi]
pi=RR.pi()
while(t00<0.0):
t00=t00+RR(2.0*pi)
while(t11<0):
t11=t11+RR(2.0*pi)
while(t00>2*pi):
t00=t00-RR(2.0*pi)
while(t11>2*pi):
t11=t11-RR(2.0*pi)
xc=CC(c).real()
yc=CC(c).imag()
num_pts=3
t = var('t')
if t11>t00:
ca = parametric_plot((r*cos(t)+xc, r*sin(t)+yc), (t, t00,t11))
else:
ca = parametric_plot((r*cos(t)+xc, r*sin(t)+yc), (t, t11,t00))
#L0 = [[RR(r*cos(t00+i*(t11-t00)/num_pts))+xc,RR(r*sin(t00+i*(t11-t00)/num_pts))+yc] for i in range(0 ,num_pts)]
#ca=line(L0)
return ca
def nice_coset_reps(G):
r"""
Compute a better/nicer list of right coset representatives [V_j]
i.e. SL2Z = \cup G V_j
Use this routine for known congruence subgroups.
EXAMPLES::
sage: G=MySubgroup(Gamma0(5))
sage: G._get_coset_reps_from_G(Gamma0(5))
[[1 0]
[0 1], [ 0 -1]
[ 1 0], [ 0 -1]
[ 1 1], [ 0 -1]
[ 1 -1], [ 0 -1]
[ 1 2], [ 0 -1]
[ 1 -2]]
"""
cl=list()
S,T=SL2Z.gens()
lvl=G.generalised_level()
# Start with identity rep.
cl.append(SL2Z([1 ,0 ,0 ,1 ]))
if(not S in G):
cl.append(S)
# If the original group is given as a Gamma0 then
# the reps are not the one we want
# I.e. we like to have a fundamental domain in
# -1/2 <=x <= 1/2 for Gamma0, Gamma1, Gamma
for j in range(1 , ZZ( ceil(RR(lvl/2.0))+2)):
for ep in [1 ,-1 ]:
if(len(cl)>=G.index()):
break
# The ones about 0 are all of this form
A=SL2Z([0 ,-1 ,1 ,ep*j])
# just make sure they are inequivalent
try:
for V in cl:
if((A<>V and A*V**-1 in G) or cl.count(A)>0 ):
raise StopIteration()
cl.append(A)
except StopIteration:
pass
# We now addd the rest of the "flips" of these reps.
# So that we end up with a connected domain
i=1
while(True):
lold=len(cl)
for V in cl:
for A in [S,T,T**-1 ]:
B=V*A
try:
for W in cl:
if( (B*W**-1 in G) or cl.count(B)>0 ):
raise StopIteration()
cl.append(B)
except StopIteration:
pass
if(len(cl)>=G.index() or lold>=len(cl)):
# If we either did not addd anything or if we addded enough
# we exit
break
# If we missed something (which is unlikely)
if(len(cl)<>G.index()):
print "cl=",cl
raise ValueError,"Problem getting coset reps! Need %s and got %s" %(G.index(),len(cl))
return cl
| Python |
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2010 Fredrik Strömberg <fredrik314@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
r"""
Implements Spaces of automorphic forms, for example Harmonic Weak Maass forms.
AUTHORS:
- Fredrik Strömberg
EXAMPLES::
# Note: need mpmath and mpc installed for multiprecision computations
# install mpmath using:
# sage -i mpmath
# sage -i mpc
sage: H=HarmonicWeakMaassForms(Gamma0(8),3/2)
sage: PP=[{'+': {(1,0):0,(3,0):0}, '-': { (0, 0): 1}}]
sage: setc={(0,-1):0,(0,-2):0}
sage: F=H.get_element(PP,SetM=15,SetC=setc)
sage: F.list_coefficients(5,norm=True)
# Half integral weight forms
# Classical Example of weight 3/2 Harmonic Maass form (Eisenstein series) on Gamma0(4).
# See e.g. Hirzebruch-Zagier...
# First construct a space of modular forms
sage: M=HalfIntegralWeightForms(Gamma0(4),1/2);M
Space of Modular Forms of weight = 1/2 with theta multiplier on Congruence Subgroup Gamma0(4)
# if we have magma installed we can compute the basis quickly...
sage: M.basis()
[1 + 2*q + 2*q^4 + 2*q^9 + O(q^12)]
# else we use the generic numerical algorithm...
# Compute F with \xi_k(F)=M.basis()[0]
sage: F=M.xi_k_inverse_basis()
sage: M1=HalfIntegralWeightForms(Gamma0(4),3/2);M1
Space of Modular Forms of weight = 3/2 with theta multiplier on Congruence Subgroup Gamma0(4)
sage: M1.basis()
[1 + 6*q + 12*q^2 + 8*q^3 + 6*q^4 + 24*q^5 + 24*q^6 + 12*q^8 + 30*q^9 + 24*q^10 + 24*q^11 + O(q^12)]
## the interesting such F is obtained by subtracting off the theta series above
## the following function try to do some educated guesses in this direction...
sage: F=M.xi_k_inverse_basis()
# normalize to get the class number H(n) as c^+(n)
sage: F1=F*mpmath.mp.mpf(2)/(mpmath.mp.mpf(16)*mpmath.mp.pi())
sage: F1.list_coefficients(10,norm=False,cusp=0)
For normalization we use:
c0=c[ 0 0 ]= (-0.08333333333333333479249476 + 1.374218596157417238432065e-17j)
c0**4= (0.0000482253086419753120196638 - 3.18106156517920673754516e-20j)
c0**-4= (20735.99999999999854765577 + 1.367798246876169665343804e-11j)
c-[ 0 , -1 ]= (0.1410473958869390714828967 + 1.040527440338284773385003e-18j)
C[ 0 , -10 : -10.0 ]= (-6.048848586346993727605162e-13 + 4.124029470887434151519853e-13j)
C[ 0 , -9 : -9.0 ]= (0.4231421876609869221276164 - 1.975630542615197649574086e-13j)
C[ 0 , -8 : -8.0 ]= (3.831516475716355575569604e-14 - 2.458561245383529927285641e-14j)
C[ 0 , -7 : -7.0 ]= (-9.500735585919241090009618e-15 + 1.110921884248782522963148e-14j)
C[ 0 , -6 : -6.0 ]= (-2.384259240288897759344736e-15 + 1.403583988174435702911771e-15j)
C[ 0 , -5 : -5.0 ]= (4.815538765737730623603249e-16 - 5.761338517547018931126834e-16j)
C[ 0 , -4 : -4.0 ]= (0.2820947917738782846742579 - 7.217635823264244290311293e-17j)
C[ 0 , -3 : -3.0 ]= 0.0
C[ 0 , -2 : -2.0 ]= 0.0
C[ 0 , -1 : -1.0 ]= (0.1410473958869390714828967+ 1.040527440338284773385003e-18j)
C[ 0 ,- 0 ]= (0.03978873577297383394222094 + 0.0j)
C[ 0 ,+ 0 ]= (-0.08333333333333333479249476 + 1.374218596157417238432065e-17j)
C[ 0 , 1 : 1.0 ]= 0.0
C[ 0 , 2 : 2.0 ]= (-1.798760787601009629745205e-17 + 1.65022799150830524765262e-16j)
C[ 0 , 3 : 3.0 ]= (0.3333333333333333196776257 + 1.144304356995054422679724e-16j)
C[ 0 , 4 : 4.0 ]= (0.4999999999999999916474501 + 8.162174201008810051009322e-17j)
C[ 0 , 5 : 5.0 ]= (-1.764214819638824774085321e-17 + 2.964560603210838161433913e-16j)
C[ 0 , 6 : 6.0 ]= (-2.968188127647462036121825e-17 + 3.285957770389621651491347e-16j)
C[ 0 , 7 : 7.0 ]= (0.999999999999999814307206 + 2.96054723015787697893505e-16j)
C[ 0 , 8 : 8.0 ]= (0.9999999999999998338067601 + 2.976977203293335589862541e-16j)
C[ 0 , 9 : 9.0 ]= (1.993722392414312843107319e-15 - 2.548391116259674212745512e-15j)
C[ 0 , 10 : 10.0 ]= (2.24017436548042636561142e-15 - 1.838191079695203924831949e-15j)
sage: M=HalfIntegralWeightForms(Gamma0(8),3/2);M
Space of Modular Forms of weight = 3/2 with theta multiplier on Congruence Subgroup Gamma0(8)
sage: H=HarmonicWeakMaassForms(M);H
Space of Harmonic Weak Maass Forms of weight = 1/2 with theta multiplier on Congruence Subgroup Gamma0(8)
# magma dependent method
sage: M.basis()
[1 + 8*q^3 + 6*q^4 + 12*q^8 + 24*q^11 + O(q^12), q + 2*q^2 + 4*q^5 + 4*q^6 + 5*q^9 + 4*q^10 + O(q^12)]
# my numerical method
sage: B=M.basis_numerical();B;[G1,G2]=B
[Element of Space of Modular Forms of weight = 3/2 with theta multiplier on Congruence Subgroup Gamma0(8), Element of Space of Modular Forms of weight = 3/2 with theta multiplier on Congruence Subgroup Gamma0(8)]
sage: G1.list_coefficients(5,norm=True,cusp=0)
For normalization we use:
c0=c[ 0 0 ]= 1
c0**4= 1.0
c0**-4= 1.0
C[ 0 ,- 0 ]= 0
C[ 0 ,+ 0 ]= 1
C[ 0 , 1 : 1.0 ]= 0
C[ 0 , 2 : 2.0 ]= (3.33584254329255000726713e-16 + 3.828443554530340392128911e-16j)
C[ 0 , 3 : 3.0 ]= (8.000000000000000026678265 + 2.7994627907636757613313e-17j)
C[ 0 , 4 : 4.0 ]= (6.000000000000000038839083 + 4.677622975986922840512461e-17j)
C[ 0 , 5 : 5.0 ]= (6.514117343061377568982691e-16 + 7.582781737715197768359657e-16j)
sage: G1.list_coefficients(5,norm=True,cusp=1)
For normalization we use:
c0=c[ 1 0 ]= (-0.2973017787506802628334689 - 0.2973017787506803222047077j)
c0**4= (-0.03125000000000001086426115 - 1.24812654886380782517272e-17j)
c0**-4= (-31.99999999999998887499659 + 1.27808158603653832431049e-14j)
C[ 1 ,- 0 ]= 0
C[ 1 ,+ 0 ]= (-0.2973017787506802628334689 - 0.2973017787506803222047077j)
C[ 1 , 1 : 1.0 ]= (11.99999999999999865794267 - 1.548857790026970449743772e-15j)
C[ 1 , 2 : 2.0 ]= (5.999999999999999962775556 - 1.804866240517242240183602e-17j)
C[ 1 , 3 : 3.0 ]= (23.99999999999999732633269 - 3.12728962174064037529157e-15j)
C[ 1 , 4 : 4.0 ]= (11.99999999999999973191219 - 2.190742938951590285367188e-16j)
C[ 1 , 5 : 5.0 ]= (23.99999999999999720631503 - 3.29049103498040159747239e-15j)
"""
#*****************************************************************************
# Copyright (C) 2010 Fredrik Strömberg <stroemberg@mathematik.tu-darmstadt.de>,
#
# Distributed under the terms of the GNU General Public Licensetheta (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import mpmath
from sage.all import SageObject,Parent,ln,latex,random,divisors,ModularForms,prime_divisors,real,imag,PowerSeriesRing,PolynomialRing
from sage.rings.real_mpfr import RealNumber
from mpmath import mpf
from mysubgroup import *
from automorphic_forms_alg import *
from sage.all import I,dumps,loads,ComplexField,LaurentPolynomialRing
from multiplier_systems import *
from psage.matrix.matrix_complex_dense import *
from sage.all import magma
class AutomorphicFormSpace(Parent):
r"""
General class of automorphic forms.
Subclasses shoul
d specialize to various types.
"""
def __init__(self,G,weight=0,multiplier="",character=0,holomorphic=False,weak=True,cuspidal=False,unitary_action=0,dprec=15,prec=53,verbose=0):
r""" Initialize the space of automorphic forms.
"""
self._from_group = None # try to keep the group used to construct the MyGroup instance
if isinstance(G,(MySubgroup,HeckeTriangleGroup)):
self._group=G
elif is_int(G):
self._from_group = Gamma0(G)
self._group=MySubgroup(self._from_group)
elif str(type(G)).find("gamma")>0 or str(type(G)).find("SL2Z")>0:
self._from_group = G
try:
self._group=MySubgroup(G)
except TypeError:
raise TypeError,"Incorrect input!! Need subgroup of PSL2Z! Got :%s" %(G)
else:
raise TypeError,"Could not convert G:{0} to a group!".format(G)
self._unitary_action=unitary_action
## Define the character
# And then the multiplier system, which should be an instance of MultiplierSystem or Subclass thereof, or None.
if isinstance(character,sage.modular.dirichlet.DirichletCharacter):
self._character = character
elif is_int(character) and self._group.is_congruence():
DG = DirichletGroup(self._group.level())
if character >0 and character <len(DG):
self._character = DG[character]
else:
if verbose>0:
print "got character={0} as input!".format(character)
self._character = trivial_character(1)
elif character==0:
self._character = trivial_character(1)
else:
raise TypeError,"Could not find character {0} on group {1}".format(character,self._group)
if not multiplier:
self._multiplier = TrivialMultiplier(self._group,character=self._character)
elif isinstance(multiplier,MultiplierSystem):
self._multiplier=multiplier
self._character = multiplier._character
else:
raise TypeError,"Incorrect multiplier! Got: %s" %multiplier
self._rdim=self._multiplier._dim
# We assume weights are given as rational (integer of half-integers)
try:
self._weight=QQ(weight)
except:
raise TypeError," Need weights as rational numbers! Got:%s" % weight
# Check consistency of multiplier
if not self._multiplier.is_consistent(self._weight):
#print "mul=",self._multiplier
#print "wt=",self._weight,type(self._weight)
#print "even=",self._multiplier._character.is_even()
#print "test=",self._multiplier.is_consistent(self._weight)
#return self._multiplier
raise ValueError," The specified multiplier is not compatible with the given weight! \n multiplier:{0}, weight:{1}".format(self._multiplier,self._weight)
self._dprec=dprec
self._prec=prec
if(dprec>15):
self._mp_ctx=mpmath.mp
if prec==53:
self._prec=int(3.4*dprec)+1
else:
self._mp_ctx=mpmath.fp
#self._weight=mpmath.mpf(weight)
self._holomorphic=holomorphic
self._weak=weak
self._verbose=verbose
self._cuspidal=cuspidal
self._alphas={}
self._dimension = None
# if we are interested in dimension of the corresponding cusp form space
self._dimension_cusp_forms = None
self._dimension_modular_forms = None
self._basis_numerical = None
# A properly working typing would make this unecessary
self._is_automorphic_form_space=True
self._scaled=False
if(self._multiplier.is_trivial()):
self._rep=False
else:
self._rep=True
# testing for types are tedious when in the interactive setting...
self._is_space_of_automorphic_functions=True
def __repr__(self):
r"""
Return the string representation of self.
EXAMPLES::
sage: S=AutomorphicFormSpace(Gamma0(4));S
Space of Automorphic Forms of weight = 0 with trivial multiplier and character: Dirichlet character modulo 4 of conductor 1 mapping 3 |--> 1 on the group G:
Arithmetic Subgroup of PSL2(Z) with index 6. Given by:
perm(S)=(1,2)(3,4)(5,6)
perm(ST)=(1,3,2)(4,5,6)
Constructed from G=Congruence Subgroup Gamma0(4)
sage: S=AutomorphicFormSpace(Gamma0(4),multiplier=theta_multiplier,weight=1/2);S
Space of Automorphic Forms of weight = 1/2 with theta multiplier on the group G:
Arithmetic Subgroup of PSL2(Z) with index 6. Given by:
perm(S)=(1,2)(3,4)(5,6)
perm(ST)=(1,3,2)(4,5,6)
Constructed from G=Congruence Subgroup Gamma0(4)
"""
s="Space of "
if self.is_holomorphic():
if self.is_cuspidal():
s+="Cusp Forms "
else:
s+="Modular Forms "
elif str(type(self)).find("HarmonicWeakMaassForms")>0:
s+="Harmonic Weak Maass Forms "
else:
s+="Automorphic Forms "
s+="of weight = "+str(self._weight)+" "
if str(self._multiplier).find("theta_multiplier")>0:
s+=" with theta multiplier "
elif not self._multiplier.is_trivial():
s+=" with multiplier:\n"+str(self._multiplier)
else:
s+=" with trivial multiplier "
#if(self._character<>trivial and self._character<>trivial_character(self._group.level())):
# s+=" and character: "+str(self._character)+" "
s+="on "
if(self._from_group):
s+=str(self._from_group)
else:
s+="the group G:\n"+str(self._group)
return s
def __reduce__(self):
r"""
"""
# we can't pickle functions so we store the names instead
#if(isinstance(self._multiplier,type(trivial))):
# multiplier_s=self._multiplier.func_name
#else:
# multiplier_s=self._multiplier
#if(isinstance(self._character,type(trivial))):
# character_s=self._character.func_name
#else:
# character_s=self._character
if(self._from_group):
G = self._from_group
else:
G = self._group
return(AutomorphicFormSpace,(G,self._weight,self.multiplier(),self._holomorphic,self._weak,self._cuspidal,self._dprec,self._verbose))
def __eq__(self,other):
r"""
Compare self to other.
"""
if self._verbose>0:
print "in AutomorphicFormSpace.__eq__"
if(not isinstance(other,type(self))):
return False
if(self._weight <> other._weight):
return False
if(self._group <> other._group):
return False
if(self._multiplier <> other._multiplier):
return False
if(self._character <> other._character):
return False
if(self._holomorphic <> other._holomorphic):
return False
if(self._weak <> other._weak):
return False
if(self._cuspidal <> other._cuspidal):
return False
#eq = eq and (self._dprec == other._weak)
# return False
#print "eq=",eq
return True
def __ne__(self,other):
r"""
Compare self to other.
"""
return not self.__eq__(other)
# return various properties of self
def group(self):
r""" Return the group of self.
"""
return self._group
def weight(self):
r""" Return the weight of self.
"""
return self._weight
def character(self):
r""" Return the character of self.
"""
return self._multiplier._character
def multiplier(self):
r""" Return the multiplier of self.
"""
return self._multiplier
def prec(self,prec=None):
if prec<>None:
self._prec=prec
return self._prec
def dprec(self,dprec=None):
if dprec<>None:
self._dprec=dprec
return self._dprec
def is_holomorphic(self):
r"""
Return True if self is holomorphic, otherwise False.
"""
return self._holomorphic
def is_cuspidal(self):
r"""
Return True if self is cuspidal, otherwise False.
"""
return self._cuspidal
def is_weak(self):
r"""
Return True if self is a weak form, i.e. has pole(s) at oo, otherwise False.
"""
return self._weak
def is_harmonic(self):
r"""
Return True if self is a harmonic weak maassform, i.e. has pole(s) at oo, otherwise False.
"""
return self._harmonic
def level(self):
r""" Return the level of self (if self.group is a congruence subgroup).
"""
return self._group.level()
def rep(self,A):
r"""
Calculate the representation = multiplier (including character) of self on the matrix A
"""
v= self._multiplier(A)
# duality and character should be builtin in the multiplier
# v = 1/v
#v=v*self._character(A[1,1])
return v
def alpha(self,i):
r"""
Compute the translation at cusp nr. i, i.e. v(T_i)=e(alpha(i))
-''i'' -- index of the cusp
"""
RF=RealField(self._prec)
CF=ComplexField(self._prec)
if(not self._alphas.has_key(i)):
if(self._multiplier == None or self._multiplier.is_trivial()):
self._alphas[i]=[RF(0),CF(1)]
else:
#p=self._group._cusps[i]
A=self._group._cusp_data[i]['stabilizer']
tmp = self.rep(A)
if hasattr(tmp,"complex_embedding"):
v=tmp.complex_embedding(self._prec)
else:
v=CF(tmp)
#ar=mpmath.arg(v)/mpmath.pi()/mpmath.mpf(2)
ar=v.argument()/RF.pi()/RF(2)
self._alphas[i]=[ar,v]
return self._alphas[i]
def alphas(self):
return self._alphas
def set_alphas(self,prec=None):
r""" Compute the vector containing the shifts at the various cusps.
"""
precold=self._prec
if prec<>None and prec<>self._prec:
self._prec=prec
for i in range( self._group._ncusps):
self.alpha(i)
self._prec=precold
def dimension(self):
r"""
Return the dimension of self if we can compute it.
"""
if self._dimension:
return self._dimension
k=self._weight
if self._holomorphic and self._rdim==1:
if is_int(k) and self._multiplier.is_trivial():
# Use the builtin sage routines
xi = self.character()
self._dimension_cusp_forms = dimension_cusp_forms(xi,k)
self._dimension_mod_forms = dimension_modular_forms(xi,k)
else:
# In weight 1/2 use Serre-Stark
if k==QQ(1)/QQ(2):
[d_mod,d_cusp]=self._dimension_of_weight_one_half_space()
if(self.is_cuspidal()):
return d_cusp
else:
return d_mod
elif k<QQ(1)/QQ(2):
self._dimension = 0
# Else use Cohen-Oesterle: (as described in The Web of Modularity by Ono)
elif k>QQ(3)/QQ(2):
dimension_cusp_forms = self._difference_of_dimensions(k)
dimension_mod_forms = -self._difference_of_dimensions(QQ(2-k))
elif k==QQ(3)/QQ(2):
[d_mod,d_cusp]=self._dimension_of_weight_one_half_space()
dimension_cusp_forms = self._difference_of_dimensions(k)+d_mod
dimension_mod_forms = d_cusp - self._difference_of_dimensions(QQ(1)/QQ(2))
# print "dim_cusp_forms=",dimension_cusp_forms
# print "dim_mod_forms=",dimension_mod_forms
self._dimension_cusp_forms = dimension_cusp_forms
self._dimension_mod_forms = dimension_mod_forms
if(self.is_cuspidal()):
self._dimension = self._dimension_cusp_forms
else:
self._dimension = self._dimension_mod_forms
elif self._holomorphic:
if self._group==SL2Z:
[d_mod,d_cusp]=self._dimension_of_vector_valued_forms()
else:
self._dimension=-1
return self._dimension
def _difference_of_dimensions(self,k):
r"""
Use Cohen and Oesterle to compute the difference: dim(S_k)-dim(M_{2-k})
"""
N = self._group.level()
cond = self._character.conductor()
#k = QQ(RR(self._weight))
kk = ZZ(k - QQ(1)/QQ(2))
r2 = valuation(N,2)
if(r2>=4):
zeta_k_l_chi = lambda_k_l_chi
elif(r2==3):
zeta_k_l_chi = 3
elif(r2==2):
zeta_k_l_chi = 0
for p in prime_divisors(N):
if( (p % 4) == 3):
rp = valuation(N,p)
sp = valuation(cond,p)
if(is_odd(rp) or (rp>0 and rp < 2*sp)):
zeta_k_l_chi = 2
break
if(is_even(kk)):
if(s2==0):
zeta_k_l_chi = QQ(3)/QQ(2)
elif(s2==2):
zeta_k_l_chi = QQ(5)/QQ(2)
else:
if(s2==0):
zeta_k_l_chi = QQ(5)/QQ(2)
elif(s2==2):
zeta_k_l_chi = QQ(3)/QQ(2)
if(zeta_k_l_chi<0):
raise ArithmeticError,"Could not compute zeta(k,l,chi)!"
fak = QQ(1)
for p in prime_divisors(N):
fak = fak* QQ(1+QQ(1)/QQ(p))
fak2 = QQ(1)
for p in prime_divisors(N):
if(p>2):
rp = valuation(N,p)
sp = valuation(cond,p)
if(rp < 2*sp):
lam = QQ(2*p**(rp-sp))
else:
if(is_even(rp)):
rprim=QQ(rp)/QQ(2)
lam = QQ(p**rprim)+QQ(p**(rprim-1))
else:
rprim=QQ(rp-1)/QQ(2)
lam = QQ(2*p**rprim)
fak2 = fak2 * lam
# S_k(Gamma0(N),chi) - M_{2-k}(Gamma0(N),chi)
diff_dims = fak*QQ(k-1)*QQ(N)/QQ(12)-QQ(zeta_k_l_chi)/QQ(2)*fak2
return diff_dims
def _dimension_of_weight_one_half_space(self):
r"""
Computes the dimension of M and S_{1/2}(4N,chi) where 4N and chi are the level and character of self.
"""
O = self._Omega()
dim_mod_forms = len(O)
# check number of totally even characters for the cusp forms
nn = 0
for (xi,t) in O:
l = xi.decomposition()
for xip in l:
if(xip(-1)==1):
nn = nn+1
dim_cusp_forms = len(O) - nn
return [dim_mod_forms,dim_cusp_forms]
def _Omega(self):
r"""
Computes the set of pairs (psi,t) satisfying:
(1) r**2 * t | self.level()/4
(2) chi(n)= psi(n)*kronecker(t,n) for (n,self.level())=1
"""
N = ZZ( QQ(self.level())/QQ(4))
D = DirichletGroup(self.level())
chi = self._character
Omega=[]
for t in divisors(N):
for psi in D:
r = psi.conductor()
s = ZZ(r*r*t )
if(not s.divides(N)):
continue
ok = True
for n in range(1,self.level()):
if(psi(n)*kronecker(t,n)<>chi(n)):
ok = False
break
if(ok):
Omega.append((psi,t))
return Omega
def _dimension_of_vector_valued_forms(self):
r"""
Calculates the dimension of self is self is a space of automorphic forms on SL2(Z).
"""
if self._weight < 2 or self._group<>SL2Z:
return -1
term0 = QQ(self._rdim*(self._weight-1))/QQ(12)
S,T=SL2Z.gens()
R = S*T; R2=R*R
if self._rdim>1:
wS=self._multiplier(S)[0].trace()
wR=self._multiplier(R)[0].trace()
wR2=self._multiplier(R2)[0].trace()
evs = self._multiplier(T)[0].diagonal()
wT=sum(evs) #self._multiplier(T).trace()
alphas = map(lambda x:log(CC(x))/CC(2/pi), evs)
else:
wS=self._multiplier(S)
wR=self._multiplier(R)
wR2=self._multiplier(R2)
wT=self._multiplier(T)
z2=CycloidalGroup(4).gen()
term1 = wS/QQ(4)*z2*z2**(self._weight-1)
z3=CycloidalGroup(6).gen()
term2 = wR/QQ(3)/sqrt(3)*z2*z3**(self._weight-1)
term3 = wR/QQ(3)/sqrt(3)*z2*z3**(2*(self._weight-1))
term4=0
k0=0
for a in alphas:
if a<>0:
term4+=a-0.5
else:
k0+=1
term5 = k0/2*sign(self._weight-1)
term6 = 0
if k0<>0:
if self._weight==1:
raise ArithmeticError,"Need to compute the scattering determinant!"
dim = term0 + term1 + term2 + term3 + term4 + term5 + term6
return dim
## cuspidal subspace
def cuspidal_subspace(self):
r"""
Construct the cuspidal subspace of self.
"""
S = copy(self) # copy self
S._weak = False # not weak
S._cuspidal = True # and cuspidal
#S._holmorphic = True # and holmorphic in H
#S=HalfIntegralWeightForms(G,self._weight,self._multiplier,character=self._character,holomorphic=self._holomorphic,weak=self._weak,cuspidal=True,dprec=self._dprec,verbose=self._verbose,construct=self._construct)
return S
def set_normalization(self,C=None):
r"""
-''C'' -- a dictionary of set coefficients in the form C[d][(i,n)]=c
"""
#print "C0=",C
N=dict()
N['comp_dim']=1
if isinstance(C,dict):
N['comp_dim']=max(1,len(C.keys()))
else:
N['comp_dim']=max(1,len(C))
N['SetCs']=dict()
N['cuspidal']=self._cuspidal
N['weak']=self._weak
nc=self._group.ncusps()
for j in range(N['comp_dim']):
N['SetCs'][j]=dict()
#if(P.has_key((0,0)) and H._holo):
#print "holo"
#for j in range(N['comp_dim']):
# N['SetCs'][j][(0,0)]=0
if(N['cuspidal']):
for icusp in range(nc):
v=self.alpha(icusp)[1]
if(v==1):
for j in range(N['comp_dim']):
N['SetCs'][j][(icusp,0)]=0
if(not N['weak']):
for icusp in range(nc):
al=self.alpha(icusp)[0]
if(al<-mpmath.eps()):
for j in range(N['comp_dim']):
N['SetCs'][j][(icusp,0)]=0
if isinstance(C,dict) and C<>{}:
for i in C.keys():
for (r,n) in C[i].keys():
N['SetCs'][i][(r,n)]=C[i][(r,n)]
elif isinstance(C,list) and C<>[]:
for i in range(len(C)):
for (r,n) in C[i].keys():
N['SetCs'][i][(r,n)]=C[i][(r,n)]
return N
def get_Y_and_M(self,digs=10,principal_part=[{}]):
r"""
Get good values for Y and M to truncate with an error of prec digits
"""
## todo : more alternatives
## we use the largest term of the principal part
## to estimate the Y and M
#print "pp0=",principal_part
if not isinstance(principal_part,list):
pp = [principal_part['+']]
elif len(principal_part)==1:
if principal_part[0].has_key('+'):
pp = [principal_part[0]['+']]
else:
pp = [principal_part[0]]
else:
pp=list()
for p in principal_part:
pp.append(p['+'])
maxn=0; maxa=1; maxr=0
#print "pp1=",pp
for P in pp: #rincipal_part:
for (r,n) in P.keys():
a = P[(r,n)]
aln = n + self.alpha(r)[0]
if(a>maxa):
maxa=a
if(aln < maxn):
maxr = r
maxn = aln
if self._verbose > 1:
print "maxa=",maxa
print "maxn=",maxn
print "digits=",digs
if not self._holomorphic or self._weak:
maxa = maxa * len(pp)
pp_max = {(maxr,maxn):maxa}
#[Y,M]=self.get_Y_and_M(prec,principal_part=pp)
[Y,M]=get_Y_and_M_for_hwmf(self._group,pp_max,self._weight,digs)
else:
Y=self._group.minimal_height()*mpmath.mpf(95)/mpmath.mpf(100)
M=get_M_for_holom(Y,self._weight,digs)
return [Y,M]
## def get_element(self,principal_part={},C=None,prec=10,M0_in=None,Y_in=None):
## r"""
## Get an element of self given by either principal part
## or normalization of coefficients.
## """
## print "self.type=",type(self)
## F=AutomorphicFormElement(self,principal_part=principal_part)
## if(Y_in<>None and M0_in<>None):
## Y=Y_in
## M=M0_in
## elif(Y_in<>None):
## Y=Y_in
## if(not self._holomorphic):
## M=get_M_for_hwmf(Y,self._weight,prec,principal_part)
## else:
## M=get_M_for_holom(Y,self._weight,prec)
## elif(M0_in<>None):
## M=M0_in
## Y=self._group.minimal_height()*mpmath.mpf(95)/mpmath.mpf(100)
## else:
## [Y,M]=self.get_Y_and_M(prec,principal_part)
## #Y=Y*0.7
## Q=M+30
## Ymp=mpmath.mp.mpf(Y)
## if(not (principal_part.has_key('+') or principal_part.has_key('-'))):
## raise ValueError,"Need principal part with keys '+' and '-'!"
## PP=principal_part
## V=setup_matrix_for_harmonic_Maass_waveforms_sv(self,Ymp,M,Q,PP)
## V['PP']=PP
## print "Use M,Y=",M,Y
## # recall that the zeroth coefficient is counted in the principal part
## return V
## if(C==None):
## C=dict()
## for j in range(len(self._group.cusps())):
## al=self.alpha(j)
## print "al(",j,")=",al
## if(al[1]==1):
## if(PP.has_key((j,0))):
## for r in C.keys():
## C[r]=dict()
## C[r][(j,0)]=0
## print "C=",C
## N=self.set_normalization(C)
## print "N=",N
## D=solve_system_for_harmonic_weak_Maass_waveforms(V,N,deb=True)
## F._coeffs=D
## return F
def _get_element(self,principal_part,digs=10,dbase_prec=None,SetC=None,SetY=None,SetM=None,SetQ=None,do_mpmath=0,get_mat=False,get_c=False,gr=0,version=0):
r"""
INPUT:
- `principal_part` -- list of principal part
PP[c,m]=a if the principal at cusp c contains a*q^m
- `digs` -- integer (default 10): the number of requested digits
- `dbase_prec` -- integer (default None): if set, use this number of digits for precision in all mpmath calculations
- `SetC` -- dictionary containing fourier coefficients to keep fixed (and their values)
of the form SetC[n][i]=c_i(n)
"""
## the principal part and the SetC should be lists if present
if self._verbose>0:
print "PP=",principal_part
if(not isinstance(principal_part,list)):
ppart = [principal_part]
else:
ppart = principal_part
ppart1=list()
for pp in ppart:
d=dict()
d['-']=pp.get('-',{}) # By default we have no princ. part
d['+']=pp.get('+',{})
#print "pp=",pp
if isinstance(pp.keys()[0],(list,tuple)):
d['+']=pp # If only one is given we assume it holomorphic
if self._holomorphic: # Make sure no non-holom. ppart is given for a holom. form
d['-']={}
ppart1.append(d)
if self._verbose>0:
print "PP1=",ppart1
ppart = ppart1 #principal_part
## Check whether the set coefficients are the same for all elements
## if thy are the same we only need to solve the system once (using LU decomposition).
## otherwise we need to rerun the system solving several times
if SetC<>None and not isinstance(SetC,list):
setc=list()
for i in range(len(ppart)):
setc.append(SetC)
elif not SetC:
setc = []
else:
setc=SetC
if len(setc)>0 and len(setc)<>len(ppart):
raise ValueError,"Inconsistent lengths of principal part and set coefficients!"
# recall that we treat 0-coefficients in the principal part
# as variables.
if self._verbose>0:
print "setc0=",setc
#print "group=",self.group()
for i in range(len(setc)):
for j in range(self.group().ncusps()):
if ppart[i]['+'].has_key((j,0)):
setc[i][(j,0)]=ppart[i]['+'][(j,0)]
if self._verbose>0:
print "setc1=",setc
solve_once = True
if setc<>None and len(setc)>0:
d = setc[0].keys()
for c in setc:
if c.keys()<>d:
solve_once=False
break
# raise ValueError," Inconsistent set coefficients. Need the same length in all entries! Got: %s" %(setc)
if self._verbose>0:
print "solve_once=",solve_once
print "ppart=",ppart
print "setc=",setc
#pos_part=list()
#for pp in ppart:
# pos_part.append(pp['+'])
if(not (SetY and SetM)):
[Y,M]=self.get_Y_and_M(digs,ppart)
# print "dps=",mpmath.mp.dps
#Y=Y*0.5 #97.
if(SetY<>None):
Y=SetY
if(SetM<>None):
M=SetM
if SetQ<>None and SetQ>M:
Q = SetQ
else:
Q=M+10
mpmath.mp.prec = self._prec
Ymp=RealField(self._prec)(Y) #mpmath.mp.mpf(Y)
dpold=mpmath.mp.dps
#if dbase_prec<>None:
# mpmath.mp.dps=max(self._dprec,dbase_prec)
#else:
# mpmath.mp.dps=self._dprec
if self._verbose>0:
print "dps=",mpmath.mp.dps
print "setc=",setc
print "Y=",Ymp
print "M=",M
print "Q=",Q
print "PP=",ppart
print "do_mpmath=",do_mpmath
print "alphas=",self.alphas()
C = None
if do_mpmath==1:
V=setup_matrix_for_harmonic_Maass_waveforms_sv_bak(self,Ymp,M,Q,ppart)
elif do_mpmath==2:
V=setup_matrix_for_harmonic_Maass_waveforms_sv_bak_22(self,Ymp,M,Q,ppart)
elif version==0 or gr==1:
V=setup_matrix_for_harmonic_Maass_waveforms_sv(self,Ymp,M,Q,ppart)
else:
C = setup_and_solve_for_harmonic_Maass_waveforms(self,Ymp,M,Q,ppart,cset=setc)
if gr==1:
return V
if solve_once and C==None:
#N=set_norm_harmonic_weak_maass_forms(self,ppart,setc)
#if isinstance(setc,list):
if do_mpmath==0:
N = self.set_normalization(setc)
else:
N = self.set_norm(setc)
#else:
# N = self.set_normalization(setc)
V['PP']=ppart
#return V,N
if self._verbose>0:
print "N=",N
if do_mpmath<>0:
C=solve_system_for_harmonic_weak_Maass_waveforms_mpmath(V,N)
else:
C=solve_system_for_harmonic_weak_Maass_waveforms(V,N)
elif C==None:
C=list()
RHS=V['RHS']
if RHS.cols<>len(ppart):
raise ValueError,"Inconsistent lengths of principal part and right hand sides!"
for i in range(len(ppart)):
pp=[ppart[i]]; cc=[setc[i]]
V['PP']=pp
V['RHS']=RHS.column(i)
if self._verbose>1:
print "cc=",cc
print "pp=",pp
N = self.set_norm(ppart,setc)
#N=set_norm_harmonic_weak_maass_forms(self,pp,cc)
if self._verbose>1:
print "N=",N
#return V,N
try:
C.append(solve_system_for_harmonic_weak_Maass_waveforms(V,N)[0])
except:
return C.append((V,N))
#mpmath.mp.dps=dpold
res=list()
if get_c:
return C
prec = mpmath.mp.dps
if len(C)>0:
for i in range(len(C)):
#print "PP=",ppart[i]
ppf=dict()
ppf['+']=ppart[i]['+']
ppf['-']=ppart[i]['-']
if self._verbose>1:
print "type=",type(self)
if str(type(self)).find("HalfIntegralWeightForms")>0:
F=HalfIntegralWeightFormElement(self,C[i],principal_part=ppf)
elif str(type(self)).find("HarmonicWeakMaassForms")>0:
if self._verbose>1:
print "Constructing a Harmonic Weak Maassform"
print "pp=",ppf
F=HarmonicWeakMaassFormElement(self,C[i],prec=prec,principal_part=ppf)
else:
F=AutomorphicFormElement(self,C[i],prec=prec,principal_part=ppf)
#print "M0=",M
F._M0 = M
res.append(F)
#print "appended"
#print "res=",res
if len(res)==1:
return res[0]
else:
return res
### Now define subclasses which specialize the above general space.
class HalfIntegralWeightForms(AutomorphicFormSpace):
r"""
Space of half-integral weight modular forms forms (with theta multiplier as default).
"""
def __init__(self,G,weight=1/2,multiplier="theta",character=0,holomorphic=True,weak=False,cuspidal=False,dprec=25,verbose=0,construct=True):
r""" Initialize the space of automorphic forms.
construct == False => do not construct the magma space (might take time)
"""
# we can initialize a half-integral weight space
# from an integral weight one, i.e. with Shimura correspondence
self._shimura_image=None
self._character = character
self._basis_len=0
if(hasattr(G,"group") and hasattr(G,"weight")):
k=G.weight()
if(is_even(k)):
weight=QQ(k/2)+QQ(1)/QQ(2)
else:
raise ValueError,"Shimura Correspondence only for even weight!"
if(not G.character().is_trivial):
raise NotImplementedError, "We only deal with trivial character for now!"
self._shimura_image=G
if(hasattr(G,"is_cuspidal") and G.is_cuspidal()):
cuspidal=True
else:
cuspidal=False
# by default we go to Gamma0(4N)
multiplier="theta"
G=MySubgroup(Gamma0(4*G.level()))
print "Initializing through Shimura corr!"
if(isinstance(G,MySubgroup)):
self._group=G
self._from_group=G._G
elif is_int(G):
self._group=MySubgroup(Gamma0(G))
self._from_group=Gamma0(G)
elif( hasattr(G,'is_subgroup') and G.is_subgroup(SL2Z)):
self._group=MySubgroup(G)
self._from_group=G
else:
raise ValueError,"Did not get a group G={0}".format(G)
if multiplier=="theta":
if isinstance(self._character,int):
modulus = self._group._level; ch = self._character
elif isinstance(self._character,tuple):
modulus,ch=self._character
multiplier=ThetaMultiplier(self._group,dchar=(modulus,ch),weight=weight)
character=multiplier._character
self._character = character #_set_character(character)
#else:
# raise NotImplemented,"Only implemented theta multiplier for half-integral weight!"
# fix consistency, i.e. change to conjugate if necessary:
t1 = multiplier.is_consistent(weight)
if not t1:
multiplier.set_dual()
t2 = multiplier.is_consistent(weight)
if not t2:
raise ValueError," Could not find consistent multiplier for multiplier: %s and weight %s!" % (multiplier,weight)
# construct the space with the given multiplier
AutomorphicFormSpace.__init__(self,G,weight=weight,multiplier=multiplier,holomorphic=holomorphic,weak=weak,cuspidal=cuspidal,dprec=dprec,verbose=verbose)
self._magma_space=None
# If we have Magma installed we associate Magma's space
# unfortunately characters are only implemented for Gamma_1(N) in magma
if construct:
if character==None:
try:
s="HalfIntegralWeightForms("+str(self._group.level())+","+str(QQ(weight))+")"
self._magma_space=magma.new(s)
except TypeError,RuntimeError:
pass
else:
try:
## Want to find the corresponding magma character
D1 = DirichletGroup(self._group.level())
s = "DirichletGroup("+str(self._group.level())+")"
Dm = magma.new(s)
magma_index_char=-1
# print "my vals=",character.values()
for j in range(1,len(Dm.Elements())+1):
x = Dm.Elements()[j]
# print "x.vals=",x.ValueList()
try:
for i in range(self._group.level()):
if(x(i)<>character(i)):
raise StopIteration()
# if we are here we have found the correct characte
# print "found match!"
magma_index_char=j
break
except StopIteration:
pass
if(magma_index_char>=0):
i = magma_index_char
s="HalfIntegralWeightForms(Elements(DirichletGroup("
s=s+str(self._group.level())+"))["+str(i)+"],"+str(QQ(weight))+")"
print "S=",s
self._magma_space=magma.new(s)
else:
print "Could not construct a corresponding space in Magma!"
except TypeError:
self._magma_space=None
pass
if isinstance(self._multiplier,ThetaMultiplier):
w_minus_half=QQ(RR(weight))-QQ(1)/QQ(2)
k=2*ZZ(w_minus_half)
if(self._shimura_image==None and weight >1):
N=ZZ(self._group.level()/4)
#
# print "wt=",weight,type(weight)
if character==None or (character**2).is_trivial():
if cuspidal:
self._shimura_image=CuspForms(N,k)
else:
self._shimura_image=ModularForms(N,k)
else:
DG=DirichletGroup(N)
x=DG(character**2)
if cuspidal:
self._shimura_image=CuspForms(x,k)
else:
self._shimura_image=ModularForms(x,k)
self._construct=construct
self._dimension = self.dimension()
##
## Using Magma we can compute certain things explicitly
def _magma_dimension(self):
r"""
If we have a holomorphic space we can use magma to compute the dimension. """
try:
return self._magma_space.Dimension()
except:
raise NotImplementedError,"Currently we need magma for this functionality!"
def basis(self,prec=None,method=None):
if method == None:
# try magma first
try:
res = self.q_expansion_basis(prec,method='magma')
except ValueError:
res = self.q_expansion_basis(prec,method='numerical')
else:
res = self.q_expansion_basis(prec,method=method)
return res
def q_expansion_basis(self,prec=None,method=None):
r"""
If we have a holomorphic space we can use magma to compute a basis.
"""
if method=='magma':
if self._magma_space == None:
raise ValueError,"Magma not supported here. Choose different method!"
else:
if(prec<>None):
return list(self._magma_space.Basis(prec))
else:
return list(self._magma_space.Basis())
elif method=='numerical':
if prec == None:
Y = mpmath.mpf(0.95)*self._group.minimal_height()
prec = get_M_for_holom(Y,self._weight,self._dprec)
if self._verbose > 0:
print "Using numerical method with %s coefficients " % M
B = self.basis_numerical(prec)
res = []
for f in B:
l=[]
for n in range(prec):
l.append(f.C(n))
res.append(l)
return res
else:
raise NotImplementedError,"Currently supported methods are 'magma' and 'numerical'! Got: %s" % method
def assert_triangular_basis(self):
r"""
Check that the basis of self is upper-triangular.
"""
B = self.basis()
d = len(B)
try:
for i in range(d):
for j in range(d):
c = B[i].Coefficient(j)
if(i==j and c==0 or i<>j and c<>0):
raise StopIteration()
except StopIteration:
return False
return True
def plus_space_qseries(self,prec=None):
r"""
Get a basis for the Kohnen plus space, i.e. with coefficients satisfying n=0 or (-1)^(k-1/2) mod 4.
"""
kappa=ZZ((2*self._weight-1)/2)
if(is_even(kappa)):
sgn=1
else:
sgn=-1
d=self.dimension()
dd=self._shimura_image.dimension()
B=self.q_expansion_basis()
C=dict()
nmax=self._shimura_image.sturm_bound()**2 # should be enough
# but in order to represent a Hecke operator T(p^2) with p not dividing the level we might need more
for p in range(3,1+next_prime(self.level())):
if( self.level() % p <> 0):
break
print "p=",p
print "nmax=",nmax
nmax2=nmax*p*p # How many coefficients we need to make a Hecke basis for Tp
#print "nmax2=",nmax2
#for j in range(1,d+1):
# C[j-1]=dict()
# for n in range(nmax2):
# C[j-1][n]=B[j].Coefficient(n)
# Now have all coefficients of the basis elements.
#print C
B0=list(); B1=list()
for j in range(d):
F=B[j]
v0=True
for n in range(nmax):
tn=(n*sgn) % 4
if(F.Coefficient(n) <> 0 and tn <> 0 and tn<> 1):
v0=False
break
if(v0):
B0.append(B[j])
else:
B1.append(B[j])
return {'plus':B0,'compl':B1}
def Hecke_eigen_basis(self,p):
r"""
Return a basis of Hecke eigenfunctions of self.
"""
raise NotImplementedError
def HeckeMatrix(self,p=None,F=None):
r"""
Return the matrix of the Hecke Operator T(p^2) on
F, which should span a subspace of self.
"""
if(F==None):
B=self.basis()
else:
B=[F]
d=len(B)
print "B=",B
first_non_zero=-1
# if we didn't supply a prime we find the first p which doesn't divide the level
if(p==None):
for p in prime_range(next_prime(self.level()+1)):
if(not p.divides(self.level())):
break
print "Using p=",p
nmax=self._shimura_image.sturm_bound()**2
# Check that we have linearly the basis is in upper triangular forms
# and if not we try to permute the basis to achieve that
ok=True
fnz=dict()
# if we have to permute the basis we can go back again
P=identity_matrix(ZZ,d)
try:
for i in range(nmax):
#print "i=",i
first_non_zero=-1
ok=True
for j in range(d):
#print "B[",j,"]=",B[j]
#print "first_non_zero=",first_non_zero
for n in range(0,first_non_zero+1):
cn=B[j].Coefficient(n)
if(self._verbose>2):
print "B1[",j,"][",n,"]=",cn
if(cn<>0):
ok=False
break
if(not ok):
# need to swap this with next element
if(self._verbose>2):
print "swap ",j,"<-->",j-1
for k in range(d):
if(k<>j-1):
P[j,k]=0
P[k,j]=1
P[j,j-1]=1; P[j-1,j]=1
F=B[j-1]
B[j-1]=B[j]; B[j]=F
#print "New B=",B
break
for n in range(first_non_zero+1,nmax):
cn=B[j].Coefficient(n)
#print "B2[",j,"][",n,"]=",cn
if(cn<>0):
first_non_zero=n
fnz[j]=n
break
if(not ok):
break
else:
raise StopIteration()
raise ArithmeticError,"Could not bring basis in upper triangular form!"
except StopIteration:
pass
if(self._verbose>2):
print "B=",B
print "fnz=",fnz
V=matrix(QQ,d,d)
for j in range(d):
for n in range(d):
# figure out how many multiples of F[n] is in Tp[F[j]]
k=fnz[n]
a=self.HeckeOperatorBn(B[j],p,k)
V[j,n]=QQ(a)/QQ(B[n].Coefficient(k))
return P*V*P**-1
def HeckeOperatorBn(self,f,p,n):
r"""
"""
eps=self._character(-1)
k=ZZ (QQ(2*self._weight-1)/QQ(2))
t1=self._character(p)*kronecker(eps*n*(-1)**k,p)*p**(k-1)
b=f.Coefficient(n*p*p)+t1*f.Coefficient(n)
if( ZZ(p*p).divides(n)):
nn=ZZ (QQ(n)/QQ(p*p))
b=b+p**(2*k-1)*f.Coefficient(nn)
return b
def Ur2Operator(self,f,r,prec=12):
r"""
Compute the action of U(r^2) on the Fourier expansion of f at infinity.
"""
c=dict()
B=self.basis()
if(prec<len(B)):
prec=len(B)+1
for n in range(prec):
c[n]=f.Coefficient(n*r**2)
# we can also express U(r^2) in terms of the basis of self
# assuming we have an upper-triangular basis
v=dict()
for d in range(len(B)):
v[d]=c[d]
S=PowerSeriesRing(QQ,'q')
return S(c,prec)
def basis_numerical(self,digs=15,numc=0,SetM=None,SetY=None,**kwds):
r"""
Computes a numerical basis, i.e. using the automorphy method.
Note that this includes expansions at all cusps.
INPUT:
-`digs` -- number of correct digits wanted
-`numc` -- number of coefficients requested for each basis element
-`SetM` -- compute only this number of coefficients
-`SetY` -- compute using this hieght of the horocycle
"""
if SetM==None:
SetM = kwds.get("setM",kwds.get("setm",kwds.get("Setm",None)))
if SetY==None:
SetY = kwds.get("setY",kwds.get("sety",kwds.get("Sety",None)))
#raise NotImplemented,"TODO!"
# one problem is that unless the character is trivial
# and magma is installed we don't know the dimension
# so we have to compute that also
if(self._basis_numerical and self._basis_len>=numc):
return self._basis_numerical
d = self.dimension()
setc=list(); pp=list()
offset = 0
if(self.is_cuspidal()):
offset = 1
for i in range(d):
setc.append({})
pp.append({'+':{},'-':{}})
for j in range(d):
setc[i][(0,j+offset)]=0
setc[i][(0,i+offset)]=1
#if(self.alpha(0)[1]==1):
pp[i]['+'][(0,0)]=0
if(not self.is_cuspidal()):
if(len(pp)==0):
pp.append({'+':{(0,0):1},'-':{}})
setc.append({})
else:
pp[0]['+'][(0,0)]=1
#ppos=[{'+':{(0,0):1}}]
[Y,M]=self.get_Y_and_M(digs=digs,principal_part=pp)
if SetM:
M = SetM
if M < numc and numc>0:
M = numc
if SetY:
Y = SetY
# first get the matrx
Q = M + 10
if self._verbose>0:
print "M,Q,Y=",M,Q,Y
V=setup_matrix_for_harmonic_Maass_waveforms_sv(self,Y,M,Q,pp)
# Fix normalizations
if self._verbose>1:
print "pp=",pp
print "setc=",setc
B = self._get_element(pp,SetC=setc,SetM=M,SetY=Y)
if(not isinstance(B,list)):
self._basis_numerical = [B]
else:
self._basis_numerical = B
self._basis_len = M
return self._basis_numerical
def xi_k_inverse_pp_mpmath(self,G,digs=6,true_value=True):
r"""
Computes principal parts for a set of Harmonic weak Maass forms {f_1,...,f_d} with the property
that {f_i,g_j}=delta_{ij} where {g_1,...,g_d} is a basis of self.
INPUT:
- true_value -- True if we use e.g. -1 or (32)**(-1/4) etc.
instead of the floating point approximations.
"""
# set the normalization from G
pp = list()
eps=mpmath.power(10,-digs)
if(not isinstance(G,list)):
GG = [G]
else:
GG = G
for F in GG:
d = {'-':{},'+':{}}
for i in range(G._space._group.ncusps()):
## If we try to figure out the true value of the frst coefficients at each cusp
if(true_value):
if(self.alpha(i)[1]<>1):
continue
c = mpmath.mp.mpc(F.C(i,0).conjugate())
x = rational_approximation(abs(c)**4,eps)
ar = mpmath.arg(c)/mpmath.mp.pi()
ar_rat=rational_approximation(ar,eps)
ab = mpmath.power(abs(x),mpmath.mpf(0.25))
cc = mpmath.mp.exp(mpmath.mpc(0,ar_rat*mpmath.mp.pi()))*ab
cx = cc.real
cy = cc.imag
if(abs(cx)<eps and abs(cy)<eps):
cc = mpmath.mpc(0,0)
elif(abs(cx)<eps):
cc = mpmath.mpc(0,cy)
elif(abs(cy)<eps):
cc = mpmath.mpc(cx,0)
d['-'][(i,0)]=cc
else:
d['-'][(i,0)]=F.C(i,0).conjugate()
pp.append(d)
return pp
def xi_k_inverse_pp(self,G,digs=6,true_value=True):
r"""
Computes principal parts for a set of Harmonic weak Maass forms {f_1,...,f_d} with the property
that {f_i,g_j}=delta_{ij} where {g_1,...,g_d} is a basis of self.
INPUT:
- true_value -- True if we use e.g. -1 or (32)**(-1/4) etc.
instead of the floating point approximations.
"""
# set the normalization from G
pp = list()
RF = RealField(self._prec)
CF = ComplexField(self._prec)
eps=RF(10)**-digs
if(not isinstance(G,list)):
GG = [G]
else:
GG = G
for F in GG:
d = {'-':{},'+':{}}
for i in range(G._space._group.ncusps()):
## If we try to figure out the true value of the frst coefficients at each cusp
if(true_value):
if(self.alpha(i)[1]<>1):
continue
c = F.C(i,0).conjugate()
x = rational_approximation(abs(c)**4,eps)
if isinstance(c.imag,(int,mpf)):
ic = c.imag
else:
ic = c.imag()
if ic==0:
ar = 0
else:
ar = c.argument()/RF.pi()
#ar = mpmath.arg(c)/mpmath.mp.pi()
ar_rat=rational_approximation(ar,eps)
ab = RF(abs(x))**RF(0.25)
#ab = mpmath.power(abs(x),mpmath.mpf(0.25))
cc = CF(0,ar_rat*RF.pi()).exp()*ab
#cc = mpmath.mp.exp(mpmath.mpc(0,ar_rat*mpmath.mp.pi()))*ab
cx = cc.real()
cy = cc.imag()
if(abs(cx)<eps and abs(cy)<eps):
cc = CF(0)
elif(abs(cx)<eps):
cc = CF(0,cy)
elif(abs(cy)<eps):
cc = CF(cx,0)
d['-'][(i,0)]=cc
else:
d['-'][(i,0)]=F.C(i,0).conjugate()
pp.append(d)
return pp
def xi_k_inverse_basis(self,digs=10,pp_in=None,**kwds):
r"""
Computes a set of Harmonic weak Maass forms {f_1,...,f_d} with the property
that {f_i,g_j}=delta_{ij} where {g_1,...,g_d} is a basis of self.
INPUT:
-`digs` -- number of digits precision in result
-`pp_in` -- principal part (if we want a specific p.part set)
`**kwds` -- extra argument which propagates to algorithms computing coefficients
"""
B=self.basis_numerical(digs=digs,**kwds)
#if(not self.assert_triangular_basis()):
# raise ArithmeticError,"Basis is not upper triangular!"
H = HarmonicWeakMaassForms(self)
M = H.modular_forms_subspace()
M._weight = H.weight()
BB = M.basis_numerical() ## we want to "project away" these
## we want to know which coefficients are non-zero
cb = dict()
for n in range(1,len(BB)+1):
cb[n]=0
for f in BB:
#cn = f.Coefficient(n)
cn = f.C(n)
if(cn<>0):
cb[n]=cb[n]+1
if(cb[n]<len(BB)):
cb[n]=0
if(cb.values().count(0)==len(cb.values())):
raise ValueError,"Could not find good coefficients!"
FF=list()
setc=list()
pp = list()
eps=1E-6
for G in B:
# get the correct principal part
p = self.xi_k_inverse_pp(G,digs=6,true_value=True)[0]
# if we want to set some specific part of the principal parts
if(pp_in):
for (r,n) in pp_in.keys():
p[(r,n)]=pp_in[(r,n)]
pp.append(p)
## If G is in the + space we set some c's too...
sc=dict()
for j in range(1,4):
if self._verbose>1:
print "C(0",j,")=",G.C(0,j)
if(abs(G.C(0,j))<eps):
sc[(0,-j)]=0
# We have to guess how to get rid of any holomorphic forms...
for n in range(1,len(BB)+1):
if(cb[n]==len(BB)):
sc[(0,n)]=0
break
setc.append(sc)
# need the correct constant-zero part too...
H._verbose=2
if self._verbose>1:
print "princ_part=",pp
print "setc=",setc
FF = H._get_element(pp,digs=digs,SetC=setc,**kwds) #,dbase_prec=prec)
return FF
class AutomorphicFormElement(SageObject):
r"""
A member of a generic space of automorphic forms.
"""
def __init__(self,M,C=None,prec=53,principal_part=None,verbose=0):
r"""
Initialize an automorphic form.
INPUT:
-''M'' -- Space of automorphic forms
-''k'' -- Weight.
-''C''-- Fourier coefficients
-''prec'' -- integer, precision (if given by construction, default None)
-''principal_part -- principal part in dictionary
'+' : principal part of c^{+}
'-' : principal part of c^{-}
EXAMPLES:
"""
# TODO: fix this. It doesn't work for inherited classes...
#if(not isinstance(M,AutomorphicFormSpace)):
# raise TypeError,"Need an element of AutomorphicFormSpace. got %s" %M
if M._verbose>1:
print "MM=",M
if(not hasattr(M,"_is_automorphic_form_space")):
raise TypeError,"Need an element of AutomorphicFormSpace. got %s" %M
if(C <> None):
# We need the correct length of the coefficient vector
d1=M._rdim
d2=len(M._group.cusps())
if len(C.keys()) > d1 or (len(C.keys())<d1 and self._sym_type==None):
# If we have smaller amount we believe there is a symmetry at work...
#or (d1==1 and len(M._group._cusps)<>len(C.keys()))):
raise ValueError,"Coefficient vector of wrong format! Got length=%s" % len(C)
self._space=M
self._coeffs=C
self._prec=prec
self._base_ring=MPComplexField(prec)
self._verbose=verbose
self._maxdigs=prec # the number of digits needed to be displayed to print all digits of the coefficients
if(principal_part):
self._principal_part=principal_part
else:
self._principal_part={'+':{},'-':{}}
self._is_automorphic_form=True
if(not hasattr(self,"_class_name")):
self._class_name="AutomorphicFormElement"
def __reduce__(self):
r"""
Used for pickling self.
"""
#return(HarmonicWeakMaassFormElement,(self.space,self.principal_part.items(),self.coeffs,self.prec))
return(AutomorphicFormElement,(self._space,self._coeffs,self._prec,self._principal_part))
def __cmp__(self,other):
r"""
Compare self to other
"""
if(not isinstance(other,type(self))):
return False
eq = (self._space == other._space)
eq = eq and (self._principal_part == other._principal_part)
if(not eq):
return False
# need to check coefficients
if(self._coeffs.keys() <> other._coeffs.keys()):
return False
for r in self._coeffs.keys():
if(self._coeffs[r].keys() <> other._coeffs[r].keys()):
return False
for n in self._coeffs[r].keys():
if(self._coeffs[r][n].keys() <> other._coeffs[r][n].keys()):
return False
def _repr_(self):
r""" Return string representation of self.
EXAMPLES:
sage: WR=WeilRepDiscriminantForm(11,dual=True)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
sage: PP={(7/22,0):1}
sage: F=M.get_element(PP,12);F
Element of Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and dimension 10.
Representation is Dual of Weil representation of the discriminant form given by ZZ/22ZZ with quadratic form Q(x)=11*x**2 mod 1. with principal part: q^-5/44
"""
s="Element of "+str(self._space)
return s
def _latex_(self):
r""" Return LaTeX string representation of self.
EXAMPLES:
"""
return "Element of "+latex(self.space)
def __add__(self,G):
r"""
Add self to G.
"""
## check that -G and self are the same type of modular form
return self._lin_comb(G,1,1)
ok=True
if(not hasattr(G,'_is_automorphic_form')):
if self._verbose>0:
print "No autom form!"
ok = False
if G._space <> self._space:
if self._verbose>0:
print "Not same space as self! L:{0}, R:{1}".format(self._space,G._space)
ok = False
if(not ok):
raise NotImplementedError,"Addition of elements of type: %s and %s are not implemented!" %(type(self),type(G))
pp1 = self._principal_part
pp2 = G._principal_part
c1 = self._coeffs
c2 = G._coeffs
p = dict()
p['+']=dict()
p['-']=dict()
c = dict()
## we truncate to the smaller number of coefficients
for r in c1.keys():
c[r]=dict()
for j in c1[r].keys():
c[r][j]=dict()
for n in c1[r][j].keys():
if c2[r][j].has_key(n):
if self._verbose>1:
print "adding ",r,j,n
c[r][j][n]=c1[r][j][n]+c2[r][j][n]
if self._verbose > 1 and n==1:
print c1[r][j][n],"+",c2[r][j][n],"=",c[r][j][n]
## merge the principal parts
k1 = pp1['+'].keys(); k2 = pp1['+'].keys(); k1.extend(k2)
for r in k1:
if(k1.count(r)>0):
k1.remove(r)
for (r,n) in k1:
t=0
if(pp1['+'].has_key((r,n))):
t=t+pp1['+'][(r,n)]
if(pp2['+'].has_key((r,n))):
t=t+pp2['+'][(r,n)]
p['+'][(r,n)]=t
k1 = pp1['-'].keys(); k2 = pp1['-'].keys(); k1.extend(k2)
for r in k1:
if(k1.count(r)>0):
k1.remove(r)
for (r,n) in k1:
t=0
if(pp1['-'].has_key((r,n))):
t=t+pp1['-'][(r,n)]
if(pp2['-'].has_key((r,n))):
t=t+pp2['-'][(r,n)]
p['-'][(r,n)]=t
### To actually construct an element of the correct subclass is not so easy, especially if it has to work in attached files as well as in a standard installation...
F = eval(self._class_name+'(self._space,c,self._prec,p)')
#return AutomorphicFormElement(self._space,self._coeffs,self._prec,self._principal_part)
return F
def __mul__(self,a):
r"""
return a*self
"""
return self._lin_comb(self,a)
def __copy__(self):
r"""
Return copy of self. Probably silly way of copying but it works...
"""
#print "COpying ",self
s=dumps(self)
return loads(s)
def __div__(self,a):
r"""
return self/a
"""
if a==0:
raise ZeroDivisionError
return self._lin_comb(self,a**-1)
def _lin_comb(self,G,a,b=0):
r"""
Return a*F+b*G
"""
ok=True
res=copy(self)
if not hasattr(G,'_is_automorphic_form'):
ok = False
if G._space <> self._space:
ok = False
if not ok:
raise NotImplementedError,"Addition of elements of type: %s and %s are not implemented!" %(type(self),type(G))
pp1 = self._principal_part
pp2 = G._principal_part
c1 = self._coeffs
c2 = G._coeffs
p = dict()
p['+']=dict()
p['-']=dict()
c = dict()
## we truncate to the smaller number of coefficients
#print "a,typea=",a,type(a)
#print "b,typeb=",b,type(b)
try:
aa=self._base_ring(a)
except:
aa = self._base_ring(a.real(),a.imag())
try:
bb=self._base_ring(b)
except:
bb = self._base_ring(b.real(),b.imag())
for r in c1.keys():
c[r]=dict()
for j in c1[r].keys():
c[r][j]=dict()
for n in c1[r][j].keys():
if(c2[r][j].has_key(n)):
#print "adding ",r,j,n
#print "aa.parent()=",aa.parent(),type(aa)
#print "bb.parent()=",bb.parent(),type(bb)
#print "c1.parent=",c1[r][j][n].parent(),type(c1[r][j][n])
#print "c2.parent=",c2[r][j][n].parent(),type(c2[r][j][n])
c[r][j][n]=aa*c1[r][j][n]+bb*c2[r][j][n]
#if(n==1):
# print c1[r][j][n],"+",c2[r][j][n],"=",c[r][j][n]
## merge the principal part
k1 = pp1['+'].keys(); k2 = pp2['+'].keys(); k1.extend(k2)
for r in k1:
if(k1.count(r)>1):
k1.remove(r)
for (r,n) in k1:
t=0
if(pp1['+'].has_key((r,n))):
t=t+a*pp1['+'][(r,n)]
if(pp2['+'].has_key((r,n))):
t=t+b*pp2['+'][(r,n)]
p['+'][(r,n)]=t
k1 = pp1['-'].keys(); k2 = pp1['-'].keys(); k1.extend(k2)
for r in k1:
if(k1.count(r)>1):
k1.remove(r)
for (r,n) in k1:
t=0
if(pp1['-'].has_key((r,n))):
t=t+a*pp1['-'][(r,n)]
if(pp2['-'].has_key((r,n))):
t=t+b*pp2['-'][(r,n)]
p['-'][(r,n)]=t
res._coeffs=c
res._principal_part=p
### To actually construct an element of the correct subclass is not so easy, especially if it has to work in attached files as well as in a standard installation...
#F = eval(self._class_name+'(self._space,c,self._prec,p)')
#return AutomorphicFormElement(self._space,self._coeffs,self._prec,self._principal_part)
return res
def set_verbositiy(self,verbose):
r"""
"""
self._verbose=verbose
def prec(self):
r"""
return precision (number of coefficients) of self
"""
return self._prec
def base_ring(self):
return self._base_ring
def space(self):
r""" Return the ambient space of self.
"""
return self._space
def coeffs(self):
r""" Return the coefficients of self.
"""
return self._coeffs
def prec(self):
r""" Return the precision of self.
"""
return self._prec
def weight(self):
r""" Return the weight of ambient space.
"""
return self._space._weight
def character(self):
r""" Return the character of ambient space.
"""
return self._space._character
def multiplier(self):
r""" Return the multiplier of ambient space.
"""
return self._space._multiplier
def principal_part(self):
r""" Return the principal part of self.
"""
return self._principal_part
def is_holomorphic(self):
r"""
Return True if self is a mamber of a space of holomorphicforms, otherwise False.
"""
return self._space._holomorphic
def is_weak(self):
r"""
Return True if self is a weak form, i.e. has pole(s) at oo, otherwise False.
"""
return self._space._weak
def is_harmonic(self):
r"""
Return True if self is a harmonic weak maassform, i.e. has pole(s) at oo, otherwise False.
"""
return self._space._harmonic
def C(self,a=None,b=None,c=None):
r""" Return coefficient nr. n at cusp nr. i of component r of self.
C(n) = Coeff n at cusp 0
C(i,n) = Coeff n at cusp i
C(r,i,n) = Coeff n at cusp i and component r
"""
if(c<>None):
r=a; i=b; n=c
elif(b<>None):
r=0; i=a; n=b
elif(a<>None):
r=0; i=0; n=a
else:
raise ValueError,"Need to supply a valid index for the coefficient!"
C = self._coeffs
if(not C.has_key(r)):
return None
if(not C[r].has_key(i)):
return None
if(not C[r][i].has_key(n)):
return None
return C[r][i][n]
def my_zzcmp(self,a,b):
r"""
Compare a and b.
"""
if(a=='-0' and ZZ(b)==0):
return -1
if(b=='-0' and ZZ(a)==0):
return 1
return cmp(ZZ(a),ZZ(b))
def list_coefficients_old(self,nmax,norm=False,plus=True,minus=True,cusp=None):
r"""
List coefficients of self.
"""
have_minus=self._principal_part.get('-')<>{}
for r in self._coeffs.keys():
print ""
for j in self._coeffs[r].keys():
if cusp<>None and j<>cusp:
continue
print ""
l=self._coeffs[r][j].keys()
l.sort(cmp=self.my_zzcmp)
#c0=self._coeffs[r][j][0]
#c1=self._coeffs[r][j][1]
## Scaling factor for positive coefficients
# If we have set the constant term then C(r)(0) is in fact related to the non-holomorphic part and not the holomorphic one
a0_neg=False
c_neg_norm=1
c_norm=1
if self._principal_part.get('+',{}).has_key((j,0)):
if not self._principal_part.get('-',{}).has_key((j,0)):
a0_neg=True
print "a0_neg=",a0_neg
if norm:
for norm_index in range(0,len(l)):
if not (norm_index in l):
continue
if norm_index==0 and a0_neg:
continue
nal = norm_index + self._space.alpha(j)[0]
if nal < 0:
continue
c_norm=self._coeffs[r][j][norm_index]#/mpmath.sqrt(abs(nal))
if abs(c_norm)>mpmath.eps() and abs(c_norm)>0.0:
print "For normalization we use:"
print "c0=c[",j,norm_index,"]=",c_norm
print "c0**4=",c_norm**4 #mpmath.power(c_norm,4)
print "c0**-4=",c_norm**-4 #mpmath.power(c_norm,-4)
break
if self._verbose>1:
print "c_norm=",c_norm
# If the first non-zero coefficients is too small we don't want to normalize
for norm_index in range(0,len(l)):
nal = -norm_index + self._space.alpha(j)[0]
print "nal(",norm_index,')=',nal
if(not (-norm_index in l)):
continue
if nal>0:
continue
if nal==0 and not a0_neg:
continue #cm1=self._principal_part['-'][(j,0)]
cm1=self._coeffs[r][j][-norm_index]
if abs(cm1)>mpmath.eps() and abs(cm1)>0.0:
if abs(nal)>0:
c_neg_norm=cm1*self._base_ring(nal).abs().sqrt()
else:
c_neg_norm=cm1
print "c-[",j,",",-norm_index,"]=",cm1
break
if self._verbose>1:
print "cm_neg_norm=",c_neg_norm
## Scaling factor for negative coefficients
#cm1=self._coeffs[r][j][-1]
#if(abs(cm1)>mpmath.eps() and cm1<>0):
for n in l:
#if(is_int(n)):
al = self._space.alpha(j)[0]
nal = ZZ(n) + al
st_nal=sci_pretty_print(nal,3)
#elif(n=='-0'):
# nal=0
if(abs(nal)>nmax):
continue
if(plus == False and nal>=0):
continue
if(minus== False and nal<0):
continue
if al<>0:
if(norm and abs(c_norm)>mpmath.eps() and nal>0): # and n<>norm_index):
c=self._coeffs[r][j][n]/c_norm
elif(norm and nal<0 and abs(c_neg_norm)>mpmath.eps()):
#c=self._coeffs[r][j][n]*mpmath.sqrt(abs(nal))/c_neg_norm
c=self._coeffs[r][j][n]*sqrt(abs(nal))/c_neg_norm
else:
c=self._coeffs[r][j][n]
if(len(self._coeffs.keys())>1):
print "C[",r,",",j,",",n,"]=",c
else:
print "C[",j,",",n,":"+st_nal+"]=",c
else:
if(self._verbose>1):
print "j,n=",j,n,nal,a0_neg
if(a0_neg):
c_minus=self._coeffs[r][j][n]
c_plus=self._principal_part['+'].get((j,0),0)
else:
c_minus=self._principal_part['-'].get((j,0),0)
c_plus=self._coeffs[r][j][n]
if len(self._coeffs.keys())>1:
if have_minus:
print "C[",r,",",j,",-",n,"]=",c_minus
print "C[",r,",",j,",+",n,"]=",c_plus
else:
if have_minus:
print "C[",j,",-",n,"]=",c_minus
print "C[",j,",+",n,"]=",c_plus
def list_coefficients(self,N,cusp="all",norm=False,print_part=['+','-'],component=-1,out_prec=53):
r"""
List coefficients of self.
INPUT:
`N` -- integer, number of coefficients to list
`cusp` -- integer, list only coefficients for this cusp
`norm` -- Bool, print normalized expansions
`print_part` -- '+','-' or '+,-' (default)
`component` -- integer, print only this component (applies to vector-valued forms)
"""
have_minus=self._principal_part.get('-')<>{}
###
s=""
if component<0:
for r in self._coeffs.keys():
s+=self.list_coefficients(N=N,cusp=cusp,norm=norm,print_part=print_part,component=r,out_prec=out_prec)
return s
if cusp=="all":
for j in self._coeffs[component].keys():
s+=self.list_coefficients(N=N,cusp=j,norm=norm,print_part=print_part,component=component,out_prec=out_prec)
assert isinstance(cusp,(int,Integer))
C = self._coeffs[component][cusp]
l = C.keys()
l_plus = []; l_minus=[]
for n in l:
if self.my_zzcmp(n,0)>0:
l_plus.append(n)
else:
l_minus.append(n)
for (j,n) in self.principal_part()['+'].keys():
if j==cusp:
l_plus.append(n); C[n]=self.principal_part()['+'][(j,n)]
for (j,n) in self.principal_part()['-'].keys():
if j==cusp:
l_minus.append(n); C[n]=self.principal_part()['-'][(j,n)]
l_plus.sort(cmp=self.my_zzcmp)
l_minus.sort(cmp=self.my_zzcmp)
scaling_factors=[]
RF = RealField(self._prec)
new_prec = self._prec
if '+' in print_part: ## Print the positive part:
if norm:
norm_plus = self.principal_part()['+'].get((cusp,0),F.C(cusp,0))
if hasattr(norm_plus,"prec"):
new_prec = norm_plus.prec()
if abs(norm_plus) < RR(2.0)**(-self.prec()/2.0):
try:
for n in l_plus:
if n>0:
norm_plus = F.C(cusp,n)
if abs(norm_plus) > RR(2.0)**(-self.prec()/2.0):
raise StopIteration()
raise ArithmeticError,"Could not find non-zero positive coefficient to normalize with!"
except StopIteration:
pass
else:
norm_plus = 1
if hasattr(norm_plus,"prec"):
new_prec = norm_plus.prec()
if out_prec>new_prec:
new_prec = out_prec
RF = RealField(new_prec)
CF = ComplexField(new_prec)
norm_plus = CF(1)
al = self._space.alpha(cusp)[0]
for n in l_plus:
nal = n + al
c = C[n]
if isinstance(c,(int,mpf)):
c = CF(c.real,c.imag)
else:
c = CF(c.real(),c.imag())
c = c/norm_plus
s+="C^{{+}}_{{ {0} }} ({1}) = {2} ".format(cusp,nal,c)
if '-' in print_part:
## For the negative coefficients we scale with c_(-1) n^(1-k)
if norm:
norm_minus = F.C(cusp,-1) #self.principal_part()['-'].get((cusp,0),F.C(cusp,0))
if abs(norm_minus) < RF(2.0)**(-self.prec()/2.0):
try:
for n in l_minus:
if n>0:
norm_minus = F.C(cusp,n)
if abs(norm_minus) > RF(2.0)**(-self.prec()/2.0):
raise StopIteration()
raise ArithmeticError,"Could not find non-zero negative coefficient to normalize with!"
except StopIteration:
pass
else:
norm_minus = CF(1)
for n in l_minus:
nal = n + al
c = C[n]
if isinstance(c,(int,mpf)):
c = CF(c.real,c.imag)
else:
c = CF(c.real(),c.imag())
if nal>0:
nn = abs(nal)**(1-k)
c = c/norm_minus/nn
s+="C^{{-}}_{{ {0} }} ({1}) = {2} ".format(cusp,n,c)
return s
def print_table_of_coeffs(self,nmax,norm=False,plus=True,minus=True,cusp=None,table_format="table"):
r"""
Print a Latex table of coefficients.
"""
zl=1E-8 ### smaller coefficients than this are treated as zero
th="\\begin{"+table_format+"}[h]"
res=""
ph="\\hphantom{$-$}"
prec = self.space().prec()
eps = 2.0*10.0**(-prec*ln(2.0)/ln(10.0))
for r in self._coeffs.keys():
## Make one table per vector component
tbl=" $n$ "
cols="|l"
for j in self._coeffs[r].keys():
if(cusp<>None and j<>cusp):
continue
tbl+= "& $p="+latex(self._space._group.cusps()[j])+"$ "
cols+="|l"
tbl+="\\\\ \n"
tbl+="\hline \\noalign{\smallskip} \n"
rh="\\begin{tabular}{"+cols+"|} \n"
res=res+th+rh
# Use the union of indices for all cusps as index set.
Nlist=[]
for j in self._coeffs[r].keys():
if(cusp<>None and j<>cusp):
continue
l=self._coeffs[r][j].keys()
for n in l:
al = self._space.alpha(j)[0]
nal = ZZ(n) + al
if( ((not plus) and nal>0) or ((not minus) and nal<0)):
continue
if(abs(n)>nmax):
continue
if(Nlist.count(n)==0):
Nlist.append(n)
Nlist.sort(cmp=self.my_zzcmp)
### First get the scaling factors
pos_factor=dict()
neg_factor=dict()
a0_neg=dict()
for j in self._coeffs[r].keys():
if(cusp<>None and j<>cusp):
continue
# # Scaling factor for positive coefficients
# If we have set the constant term then C(r)(0) is in fact related to the non-holomorphic part and not the holomorphic one
a0_neg[j]=False
if(self._principal_part['+'].has_key((j,0))):
if(not self._principal_part['-'].has_key((j,0))):
a0_neg[j]=True
for norm_index in range(0,max(Nlist)):
nal = norm_index + self._space.alpha(j)[0]
if( (not (norm_index in l)) or (norm_index==0 and a0_neg[j]) or (nal <0)):
continue
c_norm=self._coeffs[r][j][norm_index]#/mpmath.sqrt(abs(nal))
if(abs(c_norm)>eps and abs(c_norm)>0.0):
print "For normalization we use:"
print "c0=c[",j,norm_index,"]=",c_norm
print "c0**4=",c_norm**4
print "c0**-4=",c_norm**-4
break
pos_norm_i=norm_index
pos_factor[j]=(pos_norm_i,c_norm)
c_neg_norm=1 #mpmath.mpf(1)
## Scaling factor for negative coefficients
# If the first non-zero coefficients is too small we don't want to normalize
for norm_index in range(0,max(Nlist)):
nal = -norm_index + self._space.alpha(j)[0]
# print "nal=",nal
if(not (-norm_index in l)):
continue
if(nal>=0):
continue
if(norm_index==0 and not a0_neg[j]):
continue #cm1=self._principal_part['-'][(j,0)]
cm1=self._coeffs[r][j][-norm_index]
if(abs(cm1)>eps and abs(cm1)>0.0):
if(abs(nal)>0):
if hasattr(nal,"ae"):
c_neg_norm=cm1*mpmath.sqrt(abs(nal))
else:
c_neg_norm=cm1*sqrt(abs(nal))
else:
c_neg_norm=cm1
print "c-[",j,",",-norm_index,"]=",cm1
# print "c_neg_norm=",c_neg_norm
break
neg_norm_i=-norm_index
print "cm_neg_norm=",c_neg_norm
neg_factor[j]=(neg_norm_i,c_neg_norm)
# Now we can finally make the table
for n in Nlist:
if(n<>0):
row=" $"+str(n)+"$"
for j in self._coeffs[r].keys():
if(cusp<>None and j<>cusp):
continue
al = self._space.alpha(j)[0]
nal = ZZ(n) + al
if(plus == False and nal>=0):
continue
if(minus== False and nal<0):
continue
if(norm and abs(c_norm)>eps and nal>0): # and n<>norm_index):
c=self._coeffs[r][j][n]/pos_factor[j][1]
elif(norm and nal<0 and abs(c_neg_norm)>eps):
if hasattr(nal,"ae"):
c=self._coeffs[r][j][n]*mpmath.sqrt(abs(nal))/neg_factor[j][1]
else:
c=self._coeffs[r][j][n]*sqrt(abs(nal))/neg_factor[j][1]
else:
c=self._coeffs[r][j][n]
s = norm_sci_pretty_print(c,16,latex_pow=True,zero_lim=zl)
if(s.lstrip()[0]<>"-"):
row+="& "+ph+"$"+s+"$ "
else:
row+="& $"+s+"$ "
row+="\\\\ \n "
else:
## Have to distinguish between alpha(j)=0 and <>0
## Add two rows of c(0)
print "a0neg=",a0_neg
row=""
if(minus):
row+=" $a^{-}(0)$ "
for j in self._coeffs[r].keys():
if(cusp<>None and j<>cusp):
continue
al = self._space.alpha(j)[0]
nal = ZZ(n) + al
if(nal<>0):
row+=" & "
else:
if(a0_neg[j]):
c_minus=self._coeffs[r][j][n]
else:
c_minus=self._principal_part['-'][(j,0)]
s = norm_sci_pretty_print(c_minus,16,latex_pow=True,zero_lim=zl)
if(s.lstrip()[0]<>"-"):
row+="& "+ph+"$"+s+"$ "
else:
row+="& $"+s+"$ "
#print "c_minus(",j,")=",c_minus
#print "s=",s
#row+=s
row+="\\\\ \n "
row+="$a^{+}(0)$ "
else:
row+="$0$ "
for j in self._coeffs[r].keys():
if(cusp<>None and j<>cusp):
continue
al = self._space.alpha(j)[0]
nal = ZZ(n) + al
if(nal<>0):
c_plus = self._coeffs[r][j][n]
else:
if(a0_neg[j]):
c_plus=self._principal_part['+'][(j,0)]
else:
c_plus=self._coeffs[r][j][n]
if(norm):
c_plus=c_plus/pos_factor[j][1]
print "c_plus=",c_plus
## We now add two rows
s=norm_sci_pretty_print(c_plus,16,latex_pow=True,zero_lim=zl)
if(s.lstrip()[0]<>"-"):
row+="& "+ph+"$"+s+"$ "
else:
row+="& $"+s+"$ "
row+="\\\\ \n"
tbl+=row
tbl+="\end{tabular} \end{"+table_format+"}"
res+=tbl
return res
def print_as_q_exp(self,nmax,base_ring=QQ,eps=1E-12):
r"""
Print self as q-expansions with coefficients truncated to nearest integer (if error is less than prec)
INPUT:
- `nmax` -- number of terms in the q-expansion
- `base_ring` -- Base ring for q-series
- `eps` -- precision for rational approximation
"""
qexps=list()
## We try with rationals...
x=ZZ['x'].gen()
QQi=QQ.extension(x**2+1,'i')
if base_ring=='QQi':
base_ring=QQi
S=PowerSeriesRing(base_ring,'q')
coeff_f_minus=dict()
coeff_f_plus=dict()
eps = 2.0*10.0**(-self.space().prec()*ln(2.0)/ln(10.0))
print "Principal part:"+self.print_principal_part()
const=dict()
for r in self._coeffs.keys():
coeff_f_minus[r]=dict()
coeff_f_plus[r]=dict()
const[r]=dict()
for j in self._coeffs[r].keys():
coeff_f_minus[r][j]=list()
coeff_f_plus[r][j]=list()
#print "c0=",c0
if(j<>0):
c0=self._coeffs[r][j][0]
n0=0
if(abs(c0)==0):
c0=self._coeffs[r][j][1]
n0=1
else:
c0=1
n0=0
if base_ring==QQ:
cr = rational_approximation(c0**4,eps)
else:
c0=base_ring(c0)
cr=base_ring(c0**4)
if hasattr(c0,"ae"):
ar = rational_approximation(mpmath.arg(c0)/mpmath.mp.pi(),eps)
elif hasattr(c0,"argument"):
ar = rational_approximation(c0.argument()/c0.base_ring().pi(),eps)
else:
if c0>0:
ar = 0
else:
ar = RR.pi()
const[r][j]=[cr,ar]
#print "c0=",c0
# First f_j^-
for n in range(0,nmax):
al=-n+self._space.alpha(j)[0]
if al<eps and self.is_holomorphic():
continue
if base_ring==QQ:
c=rational_approximation(self._coeffs[r][j][-n]/c0,eps)
else:
c=base_ring(self._coeffs[r][j][-n]/c0)
coeff_f_minus[r][j].append(c)
#print "f[",j,",",n,"]^-=",c
# Then f_j^+
for n in range(0,nmax):
al=n+self._space.alpha(j)[0]
if al<-eps:
continue
tmp=self._coeffs[r][j][n]/c0
print "tmp=",tmp,type(tmp)
if isinstance(tmp,int):
rtmp=tmp
itmp=0
else:
rtmp=tmp.real()
if hasattr(tmp,"complex_embedding"):
itmp=tmp.complex_embedding(self.space().prec()).imag()
else:
itmp=tmp.imag()
if base_ring==QQ:
c=rational_approximation(rtmp,eps)
if base_ring==QQi:
cx=rational_approximation(rtmp,eps)
cy=rational_approximation(itmp,eps)
c=cx+i*cy
else:
c=base_ring(tmp)
if base_ring==QQ:
coeff_f_plus[r][j].append(c.real())
else:
coeff_f_plus[r][j].append(c)
#print "f^+[",j,",",n,"]^+=",c
if len(self._coeffs.keys())>1:
if(coeff_f_minus[r][j].count(0)==len(coeff_f_minus[r][j])):
fm="0"
else:
fm=str(coeff_f_minus[r][j])
fp0 = str(S(coeff_f_plus[r][j]).add_bigoh(nmax))
al=self._space.alpha(j)[0]
if(al<>0):
fp="q^{"+str(rational_approximation(al))+"}("+fp0+")"
else:
fp=fp0
print "f[",r,",",j,"]^+=",fp
else:
if(coeff_f_minus[r][j].count(0)==len(coeff_f_minus[r][j])):
fm="0"
else:
fm=str(coeff_f_minus[r][j])
if not self.is_holomorphic():
print "f[",j,"]^-=",fm
al=self._space.alpha(j)[0]
fp0 = str(S(coeff_f_plus[r][j]).add_bigoh(nmax))
if(al<>0):
fp="q^{"+str(rational_approximation(al))+"}("+fp0+")"
else:
fp=fp0
print "f[",j,"]^+=",fp
for r in self._coeffs.keys():
for j in self._coeffs[r].keys():
if const[r][j][0]<>0 and const[r][j][1]<>0:
print "c[",r,j,"]^4= %s arg = %s pi" % (const[r][j][0],const[r][j][1])
def print_principal_part(self):
r""" Print the principal part of self as q-series.
"""
s=""
sp=""
for (r,n) in self._principal_part['+']:
a=self._principal_part['+'][(r,n)]
if(a<>0):
x=QQ(n+RR(self._space.alpha(r)[0]))
if(a<>1):
if(a>0 and len(sp)>0):
ast="+"+str(a)
else:
ast=str(a)
if(x<>0):
sp=sp+ast+"q^{"+str(x)+"}"
else:
sp=sp+ast
else:
if(x<>0):
sp=sp+"q^{"+str(x)+"}"
else:
sp=sp+"1"
s=s+sp+""
for (r,n) in self._principal_part['-']:
s+=' + non-holomorphic principal part...(todo: print better)'
return s
def xi_k_inverse_G(self,prec=53,pp_in=None):
r"""
Compute xi_k^-1(g) for self.
"""
M = self._space
pp = [M.xi_k_inverse_pp(self)]
H = HarmonicWeakMaassForms(M)
F = H.get_element(pp,prec)
eps = 2.0*10.0**(-self.space().prec()*ln(2.0)/ln(10.0))
if(pp_in):
p = dict()
for (r,n) in pp_in.keys():
p[(r,n)]=pp_in[(r,n)]
pp.append(p)
## If G is in the + space we set some c's too...
sc=dict()
for j in range(1,4):
if self._verbose > 1:
print "C(0",j,")=",G.C(0,j)
if(abs(G.C(0,j))<eps):
sc[(0,-j)]=0
# We have to guess how to get rid of any holomorphic forms...
for n in range(1,len(BB)+1):
if(cb[n]==len(BB)):
sc[(0,n)]=0
break
setc.append(sc)
F = H.get_element(pp,SetC=setc,prec=prec)
return F
class HalfIntegralWeightFormElement(AutomorphicFormElement):
r"""
A member of the space of half-integral weight modular forms forms (with theta multiplier as default).
"""
def __init__(self,M,C=None,prec=53,principal_part=None):
r"""
Initialize an automorphic form.
INPUT:
-''M'' -- Space of automorphic forms
-''k'' -- Weight.
-''C''-- Fourier coefficients
-''prec'' -- integer, precision (if given by construction, default None)
-''principal_part -- principal part in dictionary
'+' : principal part of c^{+}
'-' : principal part of c^{-}
EXAMPLES:
"""
if(hasattr(C,'get_magma_attribute')):
## We can initialize a half integral weight form from a magma form (q-expansion)
self._magma_form=C
coeff = dict()
coeff[0] = C.Coefficients()
else:
self._magma_form=None
coeff = C
self._class_name = "HalfIntegralWeightFormElement"
AutomorphicFormElement.__init__(self,M,C=coeff,prec=prec,principal_part=principal_part)
class HarmonicWeakMaassFormElement(AutomorphicFormElement):
r"""
Create an Harmonic Weak Maass form.
"""
def __init__(self,M,C=None,prec=None,principal_part=None):
r"""
See ``HarmonicWeakMaassFormElement`` for full documentation.
Initialize an harmonic weak Maass form.
INPUT:
-''M'' -- Space of automorphic forms
-''k'' -- Weight.
-''C''-- Fourier coefficients
-''prec'' -- integer, precision (if given by construction, default None)
EXAMPLES::
"""
#print "typeM=",type(M)
self._class_name ="HarmonicWeakMaassFormElement"
AutomorphicFormElement.__init__(self,M,C,prec=prec,principal_part=principal_part)
class HarmonicWeakMaassForms(AutomorphicFormSpace):
r"""
Space of Harmonic weak Maass forms.
"""
def __init__(self,G,weight=0,multiplier=None,holomorphic=False,weak=True,cuspidal=False,dprec=53,verbose=0,**kwds):
r""" Initialize the space of automorphic forms.
"""
if(isinstance(G,MySubgroup)):
self._group=G
self._from_group=G._G
else:
if is_int(G):
self._group=MySubgroup(Gamma0(G))
self._from_group=Gamma0(G)
elif( hasattr(G,'is_subgroup') and G.is_subgroup(SL2Z)):
self._group=MySubgroup(G)
self._from_group=G
elif( hasattr(G,'_is_space_of_automorphic_functions')):
# We can use the inverse of the \xi_k operator to map M_{k,rho} to H_{2-k,\bar{rho}} and
self._group=G._group
self._from_group=G._from_group
self._verbose = G._verbose
weight=QQ(2)-QQ(G.weight())
x=G.character()
if x.is_trivial():
character=trivial_character(self._group.level())
elif(isinstance(x,sage.modular.dirichlet.DirichletCharacter)):
if(x.order()<=2):
character=x
else:
# the conjugate character
character=x.parent()[0]/x
elif(isinstance(x,type(trivial))): # if x is a function
character = x
else:
raise ValueError, "Unknown character! x:%s" % x
else:
raise ValueError, "Could not initialize space from G:%s" % G
# We need a level divisible by 4
if multiplier==None:
if is_int(weight):
multiplier=TrivialMultiplier(self._group)
else:
if(self._group.level() % 4 <>0):
raise ValueError," Need level divisible by 4. Got:%s " % self._group.level()
#if ( int(2*weight) % 4 == 1):
multiplier=ThetaMultiplier(self._group,weight=weight)
#else:
# multiplier=ThetaMultiplier(self._group,dual=True)
self._class_name ="HarmonicWeakMaassForms"
if self._from_group:
GG = self._from_group
else:
GG = self._group
#AutomorphicFormSpace.__init__(self,GG,weight=weight,multiplier=multiplier,character=character,holomorphic=holomorphic,weak=weak,cuspidal=cuspidal,dprec=dprec,verbose=verbose)
AutomorphicFormSpace.__init__(self,GG,weight=weight,multiplier=multiplier,holomorphic=holomorphic,weak=weak,cuspidal=cuspidal,dprec=dprec,verbose=verbose,**kwds)
def modular_forms_subspace(self,cuspidal_subspace=False):
r"""
Construct the modular forms (i.e. holomorphic and non-weak) subspace of self.
"""
if(not is_int(self.weight()) and is_int(2*self.weight())):
if(self._from_group):
G = self._from_group
else:
G = self._group
M=HalfIntegralWeightForms(G,weight=self.weight(),character=self._character,multiplier=self._multiplier,holomorphic=True,weak=False,cuspidal=cuspidal_subspace,dprec=self._dprec,verbose=self._verbose)
else:
M=AutomorphicFormspace(G,weight=self.weight(),character=self._character,multiplier=self._multiplier,holomorphic=True,weak=False,cuspidal=cuspidal_subspace,dprec=self._dprec,verbose=self._verbose)
return M
def get_element(self,principal_part=None,prec=53,dbase_prec=None,ndig=10,SetC=None,SetY=None,SetM=None,**kwds):
r"""
Get an element of the space of HarmonicWeakMaassForms.
INPUT:
- ''principal_part'' -- list of principal parts
PP[c,m]=a if the principal at cusp c contains a*q^m
- ''ndig'' -- integer (default 10): the number of requested digits
- ''dbase_prec'' -- integer (default None): if set, use this number of digits for precision in all mpmath calculations
- ''SetC'' -- dictionary containing fourier coefficients to keep fixed (and their values)
of the form SetC[n][i]=c_i(n)
"""
pp = principal_part
if not isinstance(pp,list):
pp = [pp]
C = AutomorphicFormSpace._get_element(self,principal_part=principal_part,SetC=SetC,SetY=SetY,SetM=SetM,get_c=True,**kwds)
res=list()
print "principal_part=",pp
if not isinstance(SetC,list):
numfuns=1
else:
numfuns = max(1,len(SetC))
if len(C)>0:
for i in range(numfuns):
F=HarmonicWeakMaassFormElement(self,C[i],prec=prec,principal_part=pp[i])
res.append(F)
if len(res)==1:
res=res[0]
return res
def set_norm(self,P=None,C=None):
r"""
Set normalization for computing Fourier coefficients.
-''P'' -- principal part = list of dictionaries
-''C'' -- a list of dictionaries of coefficients in the form SetC[k][(i,n)]=c
(if only one dictionary is supplied we use the same for all)
"""
N=dict()
if isinstance(P,list):
N['comp_dim']=len(P)
else:
N['comp_dim']=1
P=[P]
if N['comp_dim']==0:
raise ValueError,"Need to specify at least one set of conditions!"
if self._verbose > 0:
print "comp_dim:=",N['comp_dim']
print "PP=",P
N['SetCs']=dict()
N['SetCs_neg']=dict()
N['cuspidal']=self._cuspidal
N['weak']=self._weak
nc=self._group.ncusps()
eps = 2.0*10.0**(-self.prec()*ln(2.0)/ln(10.0))
if C<>None and len(C)>0:
C1=dict()
if isinstance(C,dict):
C1[0]=[C]
for i in range(1,N['comp_dim']):
C1.append(C)
elif isinstance(C,list):
C1 = copy(C)
if len(C1)<>N['comp_dim']:
raise ValueError,"Need the same length of coefficients to set as number of principal parts!"
else:
raise ValueError,"Need the same length of coefficients to set as number of principal parts!"
else:
C1=list()
for j in range(N['comp_dim']):
C1.append({})
if self._verbose > 1:
print "SetC=",C1
# Set coefficients (e.g. in +-space or similar)
if C1<>None:
for j in range(N['comp_dim']):
N['SetCs'][j]=copy(C1[j]) # dict()
else:
for j in range(N['comp_dim']):
N['SetCs'][j]=dict()
# # Impose further restrictions by cuspidality or non-weakness
for icusp in range(nc):
al=self.alpha(icusp)[0] #
if self._verbose > 1:
print "alpha(",icusp,")=",al
if (al<-eps and not N['weak']) or (al<=eps and N['cuspidal']):
for j in range(N['comp_dim']):
N['SetCs'][j][(icusp,0)]=0
## Set coefficients given by the principal parts
## (if applicable, i.e. only the 0th-coefficient)
for j in range(N['comp_dim']):
N['SetCs_neg'][j]=dict()
if(P[j].has_key("-")):
for (r,n) in P[j]['-']:
N['SetCs_neg'][j][(r,n)]=P[j]['-'][(r,n)]
if P[j].has_key("+"):
for (r,n) in P[j]['+']:
if n==0: #if H.alpha(r)[0]==0:
if not N['SetCs'][j].has_key((r,n)):
N['SetCs'][j][(r,n)]=P[j]['+'][(r,n)]
return N
class HolomorphicModularForms(AutomorphicFormSpace):
r"""
Space of Holomorphic modular forms.
EXAMPLES:
sage: S=HolomorphicModularForms(Gamma0(1),12,prec=203)
sage: F=S.get_element(SetM=50)
sage: F.print_as_q_exp(10,prec=1E-12)
Principal part:
f[ 0 ]^+= q - 24*q^2 + 252*q^3 - 1472*q^4 + 4830*q^5 - 6048*q^6 - 16744*q^7 + 84480*q^8 - 113643*q^9 + O(q^10)
"""
def __init__(self,G,weight=None,multiplier=None,weak=False,cuspidal=True,dprec=25,verbose=0,**kwds):
r""" Initialize the space of automorphic forms.
"""
self._verbose=verbose
if( hasattr(G,'_is_space_of_automorphic_functions')):
# Look at the holomorphic space on the group of G with same weight
# and multiplier as default
self._group=G._group
self._from_group=G._from_group
self._verbose = G._verbose
if weight==None:
weight=G.weight()
if multiplier==None:
multiplier=G.multiplier()
else:
if(isinstance(G,MySubgroup)):
self._group=G
self._from_group=G._G
else:
if(is_int(G)):
self._from_group=Gamma0(G)
self._group=MySubgroup(self._from_group)
elif( hasattr(G,'is_subgroup') and G.is_subgroup(SL2Z)):
self._group=MySubgroup(G)
self._from_group=G
#AutomorphicFormSpace.__init__(self,GG,weight=weight,multiplier=multiplier,character=character,holomorphic=holomorphic,weak=weak,cuspidal=cuspidal,dprec=dprec,verbose=verbose)
AutomorphicFormSpace.__init__(self,self._group,weight=weight,multiplier=multiplier,holomorphic=True,weak=weak,cuspidal=cuspidal,dprec=dprec,verbose=verbose,**kwds)
def get_element(self,principal_part=None,ndig=10,prec=53,dbase_prec=None,SetC=None,SetY=None,SetM=None,do_mpmath=False,get_mat=False):
if SetC==None:
if self.is_cuspidal():
SetC=[{(0,0):0,(0,1):1}]
else:
SetC=[{(0,0):1,(0,1):0}]
if principal_part<>None:
pp = principal_part
else:
pp=list()
for i in range(len(SetC)):
pp.append({'+':{},'-':{}})
if not isinstance(pp,list):
pp0=[pp]
else:
pp0=pp
C = AutomorphicFormSpace._get_element(self,principal_part=pp,SetC=SetC,SetY=SetY,SetM=SetM,do_mpmath=do_mpmath,get_mat=get_mat,get_c=True)
res=list()
if len(C)>0:
for i in range(len(C)):
F= HolomorphicModularFormElement(self,C[i],prec=prec,principal_part=pp0[i])
res.append(F)
if len(res)>1:
return res
else:
return res[0]
class HolomorphicModularFormElement(AutomorphicFormElement):
r"""
Create an Harmonic Weak Maass form.
"""
def __init__(self,M,C=None,prec=None,principal_part=None):
r"""
See ``HarmonicWeakMaassFormElement`` for full documentation.
Initialize an harmonic weak Maass form.
INPUT:
-''M'' -- Space of automorphic forms
-''k'' -- Weight.
-''C''-- Fourier coefficients
-''prec'' -- integer, precision (if given by construction, default None)
EXAMPLES::
"""
#print "typeM=",type(M)
self._class_name ="HolomorphicModularFormElement"
AutomorphicFormElement.__init__(self,M,C,prec=prec,principal_part=principal_part)
def list_coefficients(self,nmax,norm=False,plus=True,minus=True,cusp=None):
r"""
List coefficients of self.
"""
for r in self._coeffs.keys():
print ""
for j in self._coeffs[r].keys():
if cusp<>None and j<>cusp:
continue
print ""
l=self._coeffs[r][j].keys()
l.sort(cmp=self.my_zzcmp)
for n in l:
al = self._space.alpha(j)[0]
if al<>0:
nal = ZZ(n) + al
st_nal=sci_pretty_print(nal,3)
else:
nal=str(n)
c=self._coeffs[r][j][n]
print "C[",j,",",nal,"]=",c
### Construction routines for specific types of forms
def WeakModularForm(G,weight=0,principal_part="q^-1",**kwds):
r"""
Construct a Weaklyp holomorphic modular form on G.
INPUT:
- `principal_part` -- Principal part given as a (vector) of q-series with coefficients in QQ.
- `weight` -- weight
- `kwds` -- keywords to get passes on to the HolomorphicModulaForms space
NOTE: If a vector of principal series is given the entries correspond to the cusps in G (with the same order)
"""
M = HolomorphicModularForms(G,weight=weight,weak=True,**kwds)
pp = extract_princial_part(M,principal_part)
F = M.get_element(principal_part={'+':pp})
return F
def HarmonicWeakMaassForm(G,weight=0,principal_part="q^-1",verbose=0,**kwds):
r"""
Construct a Harmonic weak Maass form form on G.
INPUT:
- `principal_part` -- Principal part given as a (vector) of q-series with coefficients in QQ.
- `weight` -- weight
- `kwds` -- keywords to get passes on to the HolomorphicModulaForms space
NOTE: If a vector of principal series is given the entries correspond to the cusps in G (with the same order)
NOTE: can not specify
"""
M = HarmonicWeakMaassForms(G,weight=weight,weak=True,verbose=verbose)
pp = extract_princial_part(M,principal_part)
F = M.get_element(principal_part=pp,**kwds)
return F
def extract_princial_part(M,principal_part):
r"""
Compute the principal part in the format we want for the constructors.
"""### Interpret the principal part .
LP = LaurentPolynomialRing(QQ,name='q')
q = LP.gens()[0]
YP = PolynomialRing(QQ,name='y')
y = YP.gens()[0]
ppdict={'-':{},'+':{}}
## We start by setting the constant terms to zero
## (unless they are explicitly overriden in the given principal part)
for j in range(M.group().ncusps()):
if M.alpha(j)[0]<=0:
ppdict['+'][(j,0)]=0
ppdict['-'][(j,0)]=0
if isinstance(principal_part,dict):
for r,k in principal_part.get('+',{}):
ppdict['+'][(r,k)]=principal_part['+'][(r,k)]
ppdict['-']=principal_part.get('-',{})
#print "dict:",ppdict
return ppdict
if not isinstance(principal_part,list):
principal_part=[principal_part]
for j in range(len(principal_part)):
pp = principal_part[j]
if isinstance(pp,str):
print "pp0=",pp
# # We need to convert to a Laurent polynomial.
pp = eval(pp)
elif not hasattr(pp,"add_bigoh") and pp.base_ring()==QQ:
raise ValueError,"Did not get principal part of correct type! Got:{0}".format(principal_part)
print "pp=",pp
ck=0
for k in pp.exponents():
ppdict['+'][(j,k[0])]=pp.coefficients()[ck]
ck+=1
#print "ppdict=",ppdict
return ppdict
def shifts(FQM):
v=dict()
for x in FQM.list():
v[x]=FQM.Q(x)
return v
def set_norm_harmonic_weak_maass_forms(H,P=None,C=None):
r"""
-''H'' -- space of automorphic forms
-''P'' -- principal parts = list of dictionaries
-''C'' -- a list of dictionaries of coefficients in the form SetC[k][(i,n)]=c
(if only one dictionary is supplied we use the same for all)
"""
N=dict()
N['comp_dim']=len(P)
if(N['comp_dim']==0):
raise ValueError,"Need to specify at least one set of conditions!"
if H._verbose > 0:
print "comp_dim:=",N['comp_dim']
print "PP=",P
N['SetCs']=dict()
N['SetCs_neg']=dict()
N['cuspidal']=H._cuspidal
N['weak']=H._weak
nc=H._group.ncusps()
eps = 2.0*10.0**(-H.prec()*ln(2.0)/ln(10.0))
if C<>None and len(C)>0:
C1=dict()
if(isinstance(C,dict)):
C1[0]=[C]
for i in range(1,N['comp_dim']):
C1.append(C)
elif(isinstance(C,list)):
C1 = copy(C)
if(len(C1)<>N['comp_dim']):
raise ValueError,"Need the same length of coefficients to set as number of principal parts!"
else:
raise ValueError,"Need the same length of coefficients to set as number of principal parts!"
else:
C1=list()
for j in range(N['comp_dim']):
C1.append({})
if H._verbose > 1:
print "SetC=",C1
# Set coefficients (e.g. in +-space or similar)
if(C1<>None):
for j in range(N['comp_dim']):
N['SetCs'][j]=copy(C1[j]) # dict()
else:
for j in range(N['comp_dim']):
N['SetCs'][j]=dict()
## Set coefficients given by the principal parts
for j in range(N['comp_dim']):
N['SetCs_neg'][j]=dict()
if(P[j].has_key("-")):
for (r,n) in P[j]['-']:
N['SetCs_neg'][j][(r,n)]=P[j]['-'][(r,n)]
if P[j].has_key("+"):
for (r,n) in P[j]['+']:
if H.alpha(r)[0]==0:
N['SetCs'][j][(r,n)]=P[j]['+'][(r,n)]
## Impose further restrictions by cuspidality or non-weakness
for icusp in range(nc):
al=H.alpha(icusp)[0]
if H._verbose > 1:
print "alpha(",icusp,")=",al
if (al<-eps and not N['weak']) or (al<=eps and N['cuspidal']):
for j in range(N['comp_dim']):
N['SetCs'][j][(icusp,0)]=0
if al<=eps:
for j in range(N['comp_dim']):
if(P[j]['+'].has_key((icusp,0))):
## We only have to set the zeroth coefficients if we don't use it as a variable for a^{-}
if(P[j].has_key('-') and P[j]['-'].has_key((icusp,0))):
N['SetCs'][j][(icusp,0)]=P[j]['+'][(icusp,0)]
#else:
# N['SetCs'][j][(icusp,0)]=0
# remove coefficients that are already set using the principal part
# and set them using the supplied coefficients otherwise
#for i in range(len(PP)):
# for (r,n) in PP[i].keys():
# N['SetCs'][i][(r,n)]=PP[i]
#print "N end=",N
return N
def set_norm_harmonic_weak_maass_forms2(H,P,C=None):
r"""
-''H'' -- space of automorphic forms
-''P'' -- principal parts
-''C'' -- a dictionary of set coefficients in the form SetC[(i,n)]=c
"""
N=dict()
N['comp_dim']=1
N['SetCs']=dict()
N['cuspidal']=H._cuspidal
N['weak']=H._weak
nc=H._group.ncusps()
eps = 2.0*10.0**(-H.prec()*ln(2.0)/ln(10.0))
if(C<>None and len(C.keys())>0):
if(is_int(C.keys()[0])):
SetC=C
else:
SetC=dict()
SetC[0]=C
else:
SetC=C
if H._verbose > 1:
print "SetC=",SetC
if(C<>None and len(SetC.keys())>0):
N['comp_dim']=len(SetC.keys())
#print "N=",N
#print "SetC.keys=",SetC.keys()
for j in range(N['comp_dim']):
N['SetCs'][j]=dict()
if(P.has_key((0,0)) and H._holo):
#print "holo"
for j in range(N['comp_dim']):
N['SetCs'][j][(0,0)]=0
if(N['cuspidal']):
for icusp in range(nc):
v=H.alpha(icusp)[1]
if(v==1):
for j in range(N['comp_dim']):
N['SetCs'][j][(icusp,0)]=0
if(not N['weak']):
for icusp in range(nc):
al=H.alpha(icusp)[0]
if al<-eps:
for j in range(comp_dim):
SetCs[j][(icusp,0)]=0
#print "N=",N
if(SetC<>None):
for i in SetC.keys():
for (r,n) in SetC[i].keys():
if(P.has_key((r,n))):
N['SetCs'][i][(r,n)]=P[(r,n)]
else:
N['SetCs'][i][(r,n)]=SetC[i][(r,n)]
#print "N=",N
return N
def solve_system_for_harmonic_weak_Maass_waveforms(W,N):
r"""
Solve the linear system to obtain the Fourier coefficients of Maass forms
INPUT:
- ``W`` -- (system) dictionary
- ``W['Ms']`` -- M start
- ``W['Mf']`` -- M stop
- ``W['nc']`` -- number of cusps
- ``W['space']`` -- space of automorphic forms
- ``W['V']`` -- matrix of size ((Ms-Mf+1)*nc)**2
- ``W['RHS']`` -- right hand side (for inhomogeneous system) matrix of size ((Ms-Mf+1)*nc)*(dim)
- ``N`` -- normalisation (dictionary, output from the set_norm_for_maass function)
- ``N['SetCs']`` -- Which coefficients are set and their values
- ``N['comp_dim']``-- How large is the assumed dimension of the solution space
- ``N['num_set']`` -- Number of coefficients which are set
OUTPUT:
- ``C`` -- Fourier coefficients
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: mpmath.mp.dps=20
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: Y=mpmath.mpf(0.5)
sage: W=setup_matrix_for_Maass_waveforms(G,R,Y,12,22)
sage: N=set_norm_maass(1)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='-1.8055426724989656270259e-14', imag='1.6658248366482944572967e-19')
If M is too large and the precision is not high enough the matrix might be numerically singular
W=setup_matrix_for_Maass_waveforms(G,R,Y,20,40)
sage: C=solve_system_for_Maass_waveforms(W,N)
Traceback (most recent call last)
...
ZeroDivisionError: Need higher precision! Use > 23 digits!
Increasing the precision helps
sage: mpmath.mp.dps=25
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='3.780824715556976438911480324e-25', imag='2.114746048869188750991752872e-99')
"""
V=W['V']
Ms=W['Ms']
Mf=W['Mf']
nc=W.get('nc',1)
PP=W.get('PP',[])
H = W.get('space',None)
if not H:
raise TypeError,"Need a space together with our W!"
verbose = H._verbose
alphas=W['alphas']
Ml=W['Ml'] #Mf-Ms+1
variable_a_plus=W['var_a+']
variable_a_minus=W['var_a-']
if(V.ncols()<>Ml*nc or V.nrows()<>Ml*nc):
raise Exception," Wrong dimension of input matrix!"
# we have to assume that all normalizations use the same coefficients
maxit=1000
SetCs=N['SetCs']
SetCs_neg=N.get('SetCs_neg',{})
CF = MPComplexField(H.prec())
zero = CF(0)
comp_dim=N['comp_dim']
use_sym=0
SetClist=dict()
for j in range(0,comp_dim):
SetClist[j]=dict()
if len(PP)>0 and ((comp_dim<>len(SetCs.keys()) and comp_dim<>len(PP))):
print "comp_dim=",comp_dim
print "SetC=",SetCs
print "PP=",PP
raise ValueError," Inconsistent normalization SetCs:%s" % SetCs
num_set=0
for j in range(0,comp_dim):
# # First we treat set values of coefficients not corresponsing to the principal part
for (r,n) in SetCs[j].keys():
nr = r*Ml+n
if nr>=0 or not H.is_holomorphic():
SetClist[j][nr]=SetCs[j][(r,n)]
elif PP[j]['+'].has_key((r,n)) and PP[j]['-'].has_key((r,n)):
SetClist[j][nr]=0
if verbose>0:
print "SetClist_pos=",SetClist
print "var_a+=",variable_a_plus[j]
print "var_a-=",variable_a_minus[j]
## Then we check the zeroth coefficients
for r in range(nc):
if(alphas[r][1]==1):
if( (not variable_a_plus[j][r]) and (not variable_a_minus[j][r])):
nr = r*Ml
if(SetCs_neg.get(j,{}).has_key((r,0))):
SetClist[j][nr]=CF(SetCs_neg[j][(r,0)])
num_set=len(SetClist[0].keys())
if verbose>0:
print "SetClist_tot=",SetClist
t=V[0,0]
if(isinstance(t,float)):
mpmath_ctx=mpmath.fp
else:
mpmath_ctx=mpmath.mp
if verbose>0:
print "mpmath_ctx=",mpmath_ctx
#use_symmetry=False
MS = MatrixSpace(CF,int(Ml*nc-num_set),int(comp_dim))
RHS = Matrix_complex_dense(MS,0,True,True)
# We allow for either a variation of principal parts or of set coefficients
if(W.has_key('RHS')):
l=W['RHS'].ncols()
if(l>1 and l<>comp_dim):
raise ValueError,"Incorrect number of right hand sides!"
MS2 = MatrixSpace(CF,int(Ml*nc-num_set),int(Ml*nc-num_set))
LHS = Matrix_complex_dense(MS2,0,True,True)
#LHS=mpmath_ctx.matrix(int(Ml*nc-num_set),int(Ml*nc-num_set))
roffs=0
if verbose>0:
print "Ml=",Ml
print "num_set=",num_set
print "SetCs=",SetCs
print "SetClist=",SetClist
#print "Valslist=",Valslist
print "V.rows=",V.nrows()
print "V.cols=",V.ncols()
print "LHS.rows=",LHS.nrows()
print "LHS.cols=",LHS.ncols()
print "RHS.rows=",RHS.nrows()
print "RHS.cols=",RHS.ncols()
print "use_sym=",use_sym
for r in range(V.nrows()):
cr=r+Ms
if(SetClist[0].keys().count(r+Ms)>0):
roffs=roffs+1
continue
for fn_j in range(comp_dim):
if(W.has_key('RHS') and W['RHS'].ncols()>fn_j):
RHS[r-roffs,fn_j]=CF(-W['RHS'][r,fn_j])
elif(W.has_key('RHS')):
RHS[r-roffs,fn_j]=CF(-W['RHS'][r,0])
else:
RHS[r-roffs,fn_j]=zero
for c in SetClist[fn_j].keys():
v=CF(SetClist[fn_j][c])
tmp=v*V[r,c-Ms]
RHS[r-roffs,fn_j]=RHS[r-roffs,fn_j]-tmp
coffs=0
for k in range(V.ncols()):
if(SetClist[0].keys().count(k+Ms)>0):
coffs=coffs+1
continue
try:
LHS[r-roffs,k-coffs]=V[r,k]
except IndexError:
print "r,k=",r,k
print "V.rows=",V.nrows()
print "V.cols=",V.ncols()
print "roffs,coffs=",roffs,coffs
print "r-roffs,k-coffs=",r-roffs,k-coffs
print "LHS.rows=",LHS.nrows()
print "LHS.cols=",LHS.ncols()
raise IndexError,"Matrix / coefficients is set up wrong!"
#print "LHS[",r,k,"]=",LHS[r-roffs,k-coffs]
#return LHS,RHS
smin=smallest_inf_norm(LHS)
if verbose>0:
print "sminfn=",smin
dps0=CF.prec()
done=False
i=1
while (not done and i<=maxit):
try:
Q,R = LHS.qr_decomposition()
#A, p = mpmath_ctx.LU_decomp(LHS)
done=True
except ZeroDivisionError:
#t=int(mpmath_ctx.ceil(-mpmath_ctx.log10(smallest_inf_norm(LHS))))
t=int(ceil(-log_b(smallest_inf_norm(LHS),10)))
dps=t+5*i; i=i+1
if verbose>-1:
print "raising number of digits to:",dps
LHS.set_prec(dps)
# raise ZeroDivisionError,"Need higher precision! Use > %s digits!" % t
if(i>=maxit):
raise ZeroDivisionError,"Can not raise precision enough to solve system! Should need > %s digits! and %s digits was not enough!" % (t,dps)
X=dict()
for fn_j in range(comp_dim):
X[fn_j] = dict()
X[fn_j][0] = dict()
v = RHS.column(fn_j)
if verbose>0:
print "len(B)=",len(v)
#print "RHS=",v
#b = mpmath_ctx.L_solve(A, RHS.column(fn_j), p)
TMP = LHS.solve(v) #mpmath_ctx.U_solve(A, b)
roffs=0
res = (LHS*TMP-v).norm()
if verbose>0:
print "res(",fn_j,")=",res
#res = mpmath_ctx.norm(mpmath_ctx.residual(LHS, TMP, RHS.column(fn_j)))
#print "res(",fn_j,")=",res
for i in range(0,nc):
X[fn_j][0][i]=dict()
for i in range(nc):
roffs2=0
for n in range(Ml):
nn=i*Ml+n+Ms
key=n+Ms
#if(i==1):
# print n,key
if(SetClist[fn_j].keys().count(nn)>0):
if verbose>1:
print "We have set ",nn
roffs=roffs+1
X[fn_j][0][i][key]=SetClist[fn_j][nn]
if verbose>0:
print "X[",fn_j,",",i,",",key,"]=",SetClist[fn_j][nn]
print "nn=",nn
continue
try:
#X[fn_j][0][i][n-roffs2+Ms]=TMP[nn-Ms-roffs,0]
X[fn_j][0][i][key]=TMP[nn-Ms-roffs]
except IndexError:
print "n*Mli-roffs=",n,'+',Ml,'*',i,'-',roffs,"=",n+Ml*i-roffs
## We also insert the principal part if it is applicable
#mpmath.mp.dps=dpold
# return x
return X
def smallest_inf_norm(V):
r"""
Computes the smallest of the supremum norms of the columns of a matrix.
INPUT:
- ``V`` -- matrix (real/complex)
OUTPUT:
- ``t`` -- minimum of supremum norms of the columns of V
EXAMPLE::
sage: A=mpmath.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']])
sage: smallest_inf_norm(A)
mpf('5.5')
"""
minc=100
mi=0
try:
nc = V.ncols(); nr=V.nrows()
except AttributeError:
nc = V.cols; nr=V.rows
for j in range(nc):
maxr=0
for k in range(nr):
t=abs(V[k,j])
if(t>maxr):
maxr=t
if(maxr<minc):
minc=maxr
mi=j
return minc
def smallest_inf_norm_mpmath(V):
r"""
Computes the smallest of the supremum norms of the columns of a matrix.
INPUT:
- ``V`` -- matrix (real/complex)
OUTPUT:
- ``t`` -- minimum of supremum norms of the columns of V
EXAMPLE::
sage: A=mpmath.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']])
sage: smallest_inf_norm(A)
mpf('5.5')
"""
minc=100
mi=0
for j in range(V.cols):
maxr=0
for k in range(V.rows):
t=abs(V[k,j])
if(t>maxr):
maxr=t
if(maxr<minc):
minc=maxr
mi=j
return minc
def solve_system_for_harmonic_weak_Maass_waveforms_mpmath(W,N):
r"""
Solve the linear system to obtain the Fourier coefficients of Maass forms
INPUT:
- ``W`` -- (system) dictionary
- ``W['Ms']`` -- M start
- ``W['Mf']`` -- M stop
- ``W['nc']`` -- number of cusps
- ``W['V']`` -- matrix of size ((Ms-Mf+1)*nc)**2
- ``W['space']`` -- space of automorphic forms
- ``W['RHS']`` -- right hand side (for inhomogeneous system) matrix of size ((Ms-Mf+1)*nc)*(dim)
- ``N`` -- normalisation (dictionary, output from the set_norm_for_maass function)
- ``N['SetCs']`` -- Which coefficients are set and their values
- ``N['comp_dim']``-- How large is the assumed dimension of the solution space
- ``N['num_set']`` -- Number of coefficients which are set
OUTPUT:
- ``C`` -- Fourier coefficients
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: mpmath.mp.dps=20
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: Y=mpmath.mpf(0.5)
sage: W=setup_matrix_for_Maass_waveforms(G,R,Y,12,22)
sage: N=set_norm_maass(1)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='-1.8055426724989656270259e-14', imag='1.6658248366482944572967e-19')
If M is too large and the precision is not high enough the matrix might be numerically singular
W=setup_matrix_for_Maass_waveforms(G,R,Y,20,40)
sage: C=solve_system_for_Maass_waveforms(W,N)
Traceback (most recent call last)
...
ZeroDivisionError: Need higher precision! Use > 23 digits!
Increasing the precision helps
sage: mpmath.mp.dps=25
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='3.780824715556976438911480324e-25', imag='2.114746048869188750991752872e-99')
"""
V=W['V']
Ms=W['Ms']
Mf=W['Mf']
nc=W['nc']
PP=W.get('PP',[])
alphas=W['alphas']
H=W.get('space',None)
if not H:
raise ValueError," Need a space in W!"
verbose =H._verbose
Ml=W['Ml'] #Mf-Ms+1
variable_a_plus=W['var_a+']
variable_a_minus=W['var_a-']
if(V.cols<>Ml*nc or V.rows<>Ml*nc):
raise Exception," Wrong dimension of input matrix!"
# we have to assume that all normalizations use the same coefficients
SetCs=N['SetCs']
SetCs_neg=N['SetCs_neg']
zero=mpmath.mp.mpf(0)
#Vals=N['Vals']
### We have to determine whether we have two "constant" terms
#if(Ms<0 and Ms+Mf<>0):
#two_terms=1
#else:
# two_terms=0
#print "Using two constant terms!:",two_terms
comp_dim=N['comp_dim']
use_sym=0
#if(len(SetCs.keys())>0):
# num_set=len(SetCs[0])
#else:
# num_set=0
#if(len(SetCs_neg.keys())>0):
# num_set=num_set+len(SetCs_neg[0])
## converse the dictionary to a list
SetClist=dict()
for j in range(0,comp_dim):
SetClist[j]=dict()
if(comp_dim<>len(SetCs.keys()) and comp_dim<>len(PP)):
print "comp_dim=",comp_dim
print "SetC=",SetCs
print "PP=",PP
raise ValueError," Inconsistent normalization SetCs:%s" % SetCs
num_set=0
for j in range(0,comp_dim):
# # First we treat set values of coefficients not corresponsing to the principal part
for (r,n) in SetCs[j].keys():
for j in range(comp_dim):
#if(two_terms and n>=0):
# nr = r*Ml+n+1
#else:
nr = r*Ml+n
SetClist[j][nr]=SetCs[j][(r,n)]
if verbose>0:
print "SetClist_pos=",SetClist
#if(not two_terms):
#continue
#for (r,n) in SetCs_neg[j].keys():
# for j in range(comp_dim):
# nr = r*Ml+n
# SetClist[j][nr]=SetCs_neg[j][(r,n)]
## Then we check the zeroth coefficients
for r in range(nc):
if(alphas[r][1]==1):
if( (not variable_a_plus[r]) and (not variable_a_minus[r])):
nr = r*Ml
#if(SetCs_neg[j].has_key((r,0))):
#
#SetClist[j][nr]=zero
num_set=len(SetClist[0].keys())
if verbose>0:
print "SetClist_tot=",SetClist
t=V[0,0]
if(isinstance(t,float)):
mpmath_ctx=mpmath.fp
else:
mpmath_ctx=mpmath.mp
if verbose>0:
print "mpmath_ctx=",mpmath_ctx
#use_symmetry=False
RHS=mpmath_ctx.matrix(int(Ml*nc-num_set),int(comp_dim))
# We allow for either a variation of principal parts or of set coefficients
#
if(W.has_key('RHS')):
l=W['RHS'].cols
if(l>1 and l<>comp_dim):
raise ValueError,"Incorrect number of right hand sides!"
LHS=mpmath_ctx.matrix(int(Ml*nc-num_set),int(Ml*nc-num_set))
roffs=0
if verbose>0:
print "Ml=",Ml
print "num_set=",num_set
print "SetCs=",SetCs
print "SetClist=",SetClist
#print "Valslist=",Valslist
print "V.rows=",V.rows
print "V.cols=",V.cols
print "LHS.rows=",LHS.rows
print "LHS.cols=",LHS.cols
print "RHS.rows=",RHS.rows
print "RHS.cols=",RHS.cols
print "use_sym=",use_sym
for r in range(V.rows):
cr=r+Ms
if(SetClist[0].keys().count(r+Ms)>0):
roffs=roffs+1
continue
for fn_j in range(comp_dim):
if(W.has_key('RHS') and W['RHS'].cols>fn_j):
RHS[r-roffs,fn_j]=-W['RHS'][r,fn_j]
elif(W.has_key('RHS')):
RHS[r-roffs,fn_j]=-W['RHS'][r,0]
for c in SetClist[fn_j].keys():
v=SetClist[fn_j][c]
if(mpmath_ctx==mpmath.mp):
tmp=mpmath_ctx.mpmathify(v)
elif(isinstance(v,float)):
tmp=mpmath_ctx.mpf(v)
else:
tmp=mpmath_ctx.mpc(v)
tmp=tmp*V[r,c-Ms]
RHS[r-roffs,fn_j]=RHS[r-roffs,fn_j]-tmp
coffs=0
for k in range(V.cols):
if(SetClist[0].keys().count(k+Ms)>0):
coffs=coffs+1
continue
try:
LHS[r-roffs,k-coffs]=V[r,k]
except IndexError:
print "r,k=",r,k
print "V.rows=",V.rows
print "V.cols=",V.cols
print "roffs,coffs=",roffs,coffs
print "r-roffs,k-coffs=",r-roffs,k-coffs
print "LHS.rows=",LHS.rows
print "LHS.cols=",LHS.cols
return
#print "LHS[",r,k,"]=",LHS[r-roffs,k-coffs]
#return LHS
smin=smallest_inf_norm_mpmath(LHS)
if verbose>0:
print "sminfn=",smin
if(smin<>0):
t=int(mpmath_ctx.ceil(-mpmath_ctx.log10(smin)))
else:
raise ValueError,"Something wrong with normalization. Got min norm=0!"
dpold=mpmath.mp.dps
mpmath.mp.dps=max(dpold,t+5)
maxit=100;i=0
done=False
if verbose>0:
print "using number of digits:",mpmath.mp.dps
while (not done and i<=maxit):
try:
A, p = mpmath_ctx.LU_decomp(LHS)
done=True
except ZeroDivisionError:
mpmath.mp.dps=mpmath.mp.dps+5*i; i=i+1
print "raising number of digits to:",mpmath.mp.dps
# raise ZeroDivisionError,"Need higher precision! Use > %s digits!" % t
if(i>=maxit):
raise ZeroDivisionError,"Can not raise precision enough to solve system! Should need > %s digits! and %s digits was not enough!" % (t,mpmath.mp.dps)
#try:
# A, p = mpmath_ctx.LU_decomp(LHS)
#except ZeroDivisionError:
# t=int(mpmath_ctx.ceil(-mpmath_ctx.log10(smallest_inf_norm(LHS))))
# raise ZeroDivisionError,"Need higher precision! Use > %s digits!" % t
#return A
#for k in range(RHS.rows):
# print "RHS(",k,")=",RHS[k,0]
X=dict()
for fn_j in range(comp_dim):
X[fn_j] = dict() #mpmath.matrix(int(Ml),int(1))
X[fn_j][0] = dict() #mpmath.matrix(int(Ml),int(1))
if verbose>0:
print "len(B)=",len(RHS.column(fn_j))
b = mpmath_ctx.L_solve(A, RHS.column(fn_j), p)
TMP = mpmath_ctx.U_solve(A, b)
roffs=0
res = mpmath_ctx.norm(mpmath_ctx.residual(LHS, TMP, RHS.column(fn_j)))
#print "res(",fn_j,")=",res
for i in range(0,nc):
X[fn_j][0][i]=dict()
#for n in range(Ml):
# if(SetClist[fn_j].keys().count(i*Ml+n+Ms)>0):
# roffs=roffs+1
# #print "X[",fn_j,",",n,",Vals[fn_j][n]
# X[fn_j][0][n+Ms]=SetClist[fn_j][i*Ml+n+Ms]
# continue
# X[fn_j][0][n+Ms]=TMP[n-roffs,0]
for i in range(nc):
roffs2=0
for n in range(Ml):
nn=i*Ml+n+Ms
## get the key of the coefficient we set
#if(two_terms):
# if(n+Ms==0):
# key='-0' ## the constant 'negative' term
# elif(n+Ms>=1):
# key=n-1+Ms ## the constant 'positive' term
# else:
# key=n+Ms
#else:
key=n+Ms
#if(i==1):
# print n,key
if(SetClist[fn_j].keys().count(nn)>0):
if verbose>1:
print "We have set ",nn
roffs=roffs+1
X[fn_j][0][i][key]=SetClist[fn_j][nn]
if verbose>1:
print "X[",fn_j,",",i,",",key,"]=",SetClist[fn_j][nn]
print "nn=",nn
continue
#if(two_terms and n+Ms==1):
# X[fn_j][0][i]['-0']=TMP[nn-Ms-roffs,0]
#if(two_terms and n+Ms==1):
# roffs2=roffs2+1
try:
#X[fn_j][0][i][n-roffs2+Ms]=TMP[nn-Ms-roffs,0]
X[fn_j][0][i][key]=TMP[nn-Ms-roffs,0]
except IndexError:
print "n*Mli-roffs=",n,'+',Ml,'*',i,'-',roffs,"=",n+Ml*i-roffs
## We also insert the principal part if it is applicable
mpmath.mp.dps=dpold
# return x
return X
def is_int(q):
r"""
Find out if the rational number q is an integer.
INPUT:
-''q'' -- integer/rational/real
OUTPUT:
- logical -- True if q is an integer otherwise False
EXAMPLES::
sage: is_int(1)
True
sage: is_int(float(1.0))
True
sage: is_int(RR(1.0))
True
sage: is_int(6/3)
True
sage: is_int(6/4)
False
sage: is_int(1.5)
False
sage: is_int(Gamma0(1))
False
"""
if(isinstance(q,sage.rings.integer.Integer) or isinstance(q,int)):
return True
if(isinstance(q,sage.rings.rational.Rational)):
n=q.denominator()
if(n==1):
return True
if(isinstance(q,tuple)):
return False
try:
if(floor(q)==ceil(q)):
return True
except:
pass
return False
def rational_approximation(x,eps=1E-12):
r""" Computes an approximation to x in QQ(i).
INPUT:
-''x'' -- real or complex number
-''eps'' -- desired precisionin the approximation
OUTPUT:
-''y'' -- rational approximation with error eps
If |x-[x]|<eps we return [x], the nearest integer to x otherwise |x-ncf(x)| < eps where the ncf(x) is a continued fraction approximation.
"""
if hasattr(x,"imag"):
if hasattr(x,"ae"):
xr = x.real; xi=x.imag
else:
# print "x=",x,type(x)
xr = real(x); xi=imag(x)
if xi<>0:
xr=rational_approximation(xr,eps)
xi=rational_approximation(xi,eps)
return xr+I*xi
x = xr
if isinstance(x,mpf):
x = RealField(mpmath.mp.prec)(x)
#return rational_approximation(x,eps)
if isinstance(x,(int,Integer,ZZ)):
return QQ(x)
if hasattr(x,"parent"):
if x.parent() == QQ:
return x
if not isinstance(x,(float,type(RR(1)))):
raise ValueError,"Can not find rational approximation to x:{0} (of type {1}) with eps={2}".format(x,type(x),eps)
n = nearest_integer(x)
prec=x.parent().prec()
dprec=ceil(eps*ln(2.0)/ln(10.0))
RF = RealField(prec)
if abs(x-n)<eps:
return QQ(n)
else:
# xr is in [0,1]
xr = RealField(prec)(x)-RealField(prec)(n)
# Note: ncf[0]=0
ncf = nearest_integer_continued_fraction(xr,dprec)
## Get approximation from sequence:
#print "ncf=",ncf
for j in range(len(ncf)):
y = real_from_nearest_integer_continued_fraction(ncf[0:j+1])
err=RR(abs(y-xr))
#print "y=",y
#print "err=",err,type(err)
#print "eps=",eps,type(eps)
#print "cmp:",(err<eps)
if err<RR(eps):
#print "Do quit!"
break
return QQ(y+n)
def real_from_nearest_integer_continued_fraction(ncf):
r"""
Take a list of coeffficients in a nearest integer cotinued fraction and return the corresponding real point.
"""
y = 0
for j in range(len(ncf),1,-1):
y = QQ(-1/(y+ncf[j-1]))
return QQ(y+ncf[0])
## needed for pickling
import __main__
__main__.AutomorphicFormSpace=AutomorphicFormSpace
__main__.HarmonicWeakMaassForms=HarmonicWeakMaassForms
__main__.AutomorphicFormElement=AutomorphicFormElement
__main__.HalfIntegralWeightForms=HalfIntegralWeightForms
#__main__.
#__main__.
### temporary routines for testing purposes
def _test_Vs(W1,W2):
Ms=W1['Ms']
Mf=W1['Mf']
nc=W1['nc']
Ml1=W1['Mf']-W1['Ms']+1
Ml2=W2['Mf']-W2['Ms']+1
for i in range(nc):
for j in range(nc):
for r in range(Mf):
for k in range(Mf):
a=W1['V'][r+i*Ml1-W1['Ms'],k+j*Ml1-W1['Ms']]
b=W2['V'][r+i*Ml2-W2['Ms'],k+j*Ml2-W2['Ms']]
t=a-b
if(abs(t)>0.1):
print i,j,':',r,k,':',a,b,'diff:',t
print "ia=",r+i*Ml1-W1['Ms'],':',k,'+',j,'*',Ml1,'-',W1['Ms'],'=',k+j*Ml1-W1['Ms']
print "ib=",r+i*Ml2-W2['Ms'],k+j*Ml2-W2['Ms']
return
def _test_lin_comb(F,G,x0,x1,N=100):
r"""
Test if the coefficients of F+xG are integral for x0<=x <=x1
"""
prec=F.space().prec()
RF=RealField(prec)
h=RF(x1-x0)/RF(N)
ermin=1.0
for j in range(N):
x = x0+j*h
P = F._lin_comb(G,1,x)
c0 = P._coeffs[0][0][0]
er_loc=0
c = dict()
for k in range(1,9):
c[k]=RR((P._coeffs[0][0][k]/c0).real)
## remember we might have rational numbers.
# hopefully they only have 2 or 3 in the denominator...
er = abs(nearest_integer(c[k]*RF(6))-c[k]*RF(6))
if abs(c[k]) > 0.01:
if(er>er_loc):
er_loc=er
if er_loc<0.1:
print "err loc=",er_loc
for k in range(1,9):
er = abs(nearest_integer(c[k])-c[k])
if abs(c[k]) > 0.01:
print x,k,c[k],er
if er_loc<ermin:
ermin=er_loc
xmin=x
Pmin=P
print "xmin=",xmin
print "ermin=",ermin
return Pmin
def norm_sci_pretty_print(c,nd=0,es='e',latex_pow=False,zero_lim=0):
if(is_int(c)):
return str(c)
if(abs(c)<zero_lim):
return "0"
if hasattr(c,"ae"):
x=cc.real; y=cc.imag
else:
x=cc.real(); y=cc.imag()
if(abs(x)>1E-5):
sx = sci_pretty_print(x,nd,'',latex_pow)
elif(abs(x)>zero_lim):
sx = sci_pretty_print(x,2,'',latex_pow)
else:
sx=""
# print x,y
if(y>0 and sx<>""):
p="+"
else:
p=""
if(abs(y)>1E-5):
sy=p+sci_pretty_print(y,nd,'',latex_pow)+"i"
elif(abs(y)>zero_lim):
sy=p+sci_pretty_print(y,2,'',latex_pow)+"i"
else:
sy=""
## un-scientify numbers between 0.1 and 1, i.e. with exponent -01
if(sx.find("10^{-01}")>0):
ss=sx.replace("\cdot 10^{-01}","")
sx=ss.replace(".","")
if(x<0):
sx="-0."+sx.replace("-","")
else:
sx="0."+sx
if(sy.find("10^{-01}")>0):
ss=sy.replace("\cdot 10^{-01}","")
sy=ss.replace(".","")
if(y<0):
sy="-0."+sy.replace("-","")
else:
sy="0."+sy
s=sx+sy
ss=s.replace("\cdot 10^{00}","")
#print c,x,y,"::",ss
return ss
def sci_pretty_print(s,nd=0,es='e',latex_pow=False):
r""" Take a string representation of a number and returns it in scientific notation to desired number of digits.
"""
# remove leading sign #
#x=mpmath.mp.mpf(s)
#if(abs(x)<1):
# raise NotImplementedError," Only implemented for |x|>1!! got x=%s" %x
if(not isinstance(s,str)):
s=str(s)
s=s.replace("(","")
s=s.replace(")","")
s=s.strip()
if(s.count("I")+s.count("i")+s.count("j")>0):
# Get a default complex notation
s=s.replace("*","")
s=s.replace("I","i")
s=s.replace("j","i")
## We have to find imaginary and real parts
l=s.split("+")
if len(l)>1:
(s1,s2)=l
else:
(s1,s2)=s.split("-")
s2="-"+s2
if(s1.count("i")>0): # put imaginary part in standard form
ims=s1.strip("i").lstrip(); res=s2
else:
ims=s2.strip("i").lstrip(); res=s1
if(ims==""): ims="1"
sres=sci_pretty_print(res,nd,es,latex_pow)
sims=sci_pretty_print(ims,nd,es,latex_pow)
if sres=="0": sres=""
if sims=="0":
sims=""
else:
sims=sims+"i"
if sims.count("-")>0:
return sres+" "+sims.replace(" -"," - ")
elif sims<>"" and sres<>"":
return sres+" + "+sims
elif sres<>"":
return sres
elif sims<>"":
return sims
else:
raise ValueError,"Could not find pretty print for s=%s " %s
s=s.strip()
if len(s.replace(".","").strip("0"))==0:
return "0"
if s.count(".")==0:
s=s+".0"
if s[0]=='-':
ss=s.strip("-")
ss=sci_pretty_print(ss,nd,es,latex_pow)
return "-"+ss
l=s.split(".")
if len(l)>1:
(sint,sdigs)=l
elif len(s)<nd:
return s
elif len(l)>0:
sint=l[0]
sdigs=""
else:
raise ValueError," Can not do pretty print for s=%s" %s
if sdigs.count("e")>0:
l=sdigs.split("e")
sdigs=l[0]
ex=int(l[1])
else:
ex=0
if len(sint)==1 and sint=="0":
# find the number of leading zeros
sss=sdigs.lstrip("0")
nz=len(sdigs)-len(sss)+1
if nz<10:
ex="-0"+str(nz)
else:
ex=str(-nz)
# Fix correct rounding
rest=sss[nd:len(sss)]
ix=nd-1
if len(rest)>0:
if int(rest) < 5*10**(len(rest)-1):
# print " round < : since "+rest+"<"+str(5*10**(len(rest)-1))
d=int(sss[ix])
elif int(rest) > 5*10**(len(rest)-1):
# print " round > : since "+rest+">"+str(5*10**(len(rest)-1))
d=int(sss[ix])+1
else:
# if we have an exact half we round randomly
d=int(sss[ix])+int(random())
if d<10:
ssdigs=sss[0:ix]+str(d) # We account for the leading digit too
else:
ssdigs=sss[0:ix-1]+str(int(sss[ix-1])+1)+str(d-10) # We account for the leading digit too
if latex_pow:
return ssdigs[0]+"."+ssdigs[1:nd]+"\cdot 10^{"+ex+"}"
else:
return ssdigs[0]+"."+ssdigs[1:nd]+es+ex
ex=int(ex)+len(sint)-1
if abs(ex)<10:
ex="0"+str(ex)
else:
ex=str(ex)
#ssdigs=sint[1:len(sint)]+sdigs
# cut away to nd digits
if nd>0:
#ssdigs=sdigs[0:nd-1] # We acount the leading digit too
# Try to do correct rounding
rest=sdigs[nd-len(sint):len(sdigs)]
#print "sdigs=",sdigs," nd=",nd
#print "rest=",rest
ix=nd-len(sint)-1
if len(rest)>0:
if int(rest) < 5*10**(len(rest)-1):
# print " round < : since "+rest+"<"+str(5*10**(len(rest)-1))
d=int(sdigs[ix])
elif int(rest) > 5*10**(len(rest)-1):
# print " round > : since "+rest+">"+str(5*10**(len(rest)-1))
d=int(sdigs[ix])+1
else:
# if we have an exact half we round randomly
random.seed()
d=int(sdigs[ix])+int(random.getrandbits(1))
if d<10:
ssdigs=sdigs[0:ix]+str(d) # We account for the leading digit too
else:
if ix>0:
ssdigs=sdigs[0:ix-1]+str(int(sdigs[ix-1])+1)+str(d-10) # We account for the leading digit too
else:
ssdigs=str(d-10) # We account for the leading digit too
if len(sint)==1:
sint=str(int(sint)+1)
else:
ll=len(sint); stmp=sint;
sint=stmp[1:ll-1]+str(int(stmp[ll-1])+1)
else:
ssdigs=sdigs[0:ix]
#print "rest=",rest,len(rest)
ssdigs=sint[1:len(sint)]+ssdigs
else:
ssdigs=sint[1:len(sint)]+sdigs
# print sint[0]+"."+ssdigs+" \cdot 10^{"+ex+"}"
if latex_pow:
res=sint[0]+"."+ssdigs+" \cdot 10^{"+ex+"}"
return res
else:
return sint[0]+"."+ssdigs+es+ex
def _set_character(character):
if isinstance(character,str):
if ["","trivial"].count(character)==0:
raise NotImplemented,"Incorrect character! Got: %s" %multiplier
character = None
elif isinstance(character,sage.modular.dirichlet.DirichletCharacter) or isinstance(character,function):
pass
elif is_int(character):
self._character=DirichletGroup(self._group.level()).list()[character]
else:
raise TypeError," Got an unknown character : %s " % character
return character
### Functions for error estimates
def c_Ax(A,x):
""" This function shows up in the estimate of the incomplete Gamma function"""
if A<1:
return x**(A-1)
elif A>1:
return A*x**(A-1)
else:
return 1
def incgamma_upper_bound(A,x):
""" A bound for Gamma(A,x) """
return exp(-x)*c_Ax(A,x)
def sum_of_gamma_bound(M,alpha,A,X):
""" A bound of Sum_{M+1} n^{alpha} Gamma(A,nX) """
if X<0:
raise ValueError,"Can not bound this sum for X={0}".format(X)
if M<alpha/X:
M0 = ceil(alpha/X)
print "Need to increase M to {0}".format(ceil(alpha/X))
return -1,ceil(alpha/X)
f = exp(-X*M)*X**-alpha
if A<1:
return f*c_Ax(alpha+A-1,X*M)
elif A==1:
return f/X*c_Ax(alpha+1,X*M)
else:
return f*A*c_Ax(alpha+A-1,X*M)
def error_bound_minus(k,M,Y):
"""
Bound truncated non-holomorphic part of a Harmonic weak Maass form
"""
if Y>=1: ## It is the level and not the height
Y0 = sqrt(3.0)/2/Y
else:
Y0=Y
fourpi = RR.pi()*RR(4)
c1 = max(1.0-k,1.0)
c2 = max(1.5*k+0.75,1.0)
f1 = 6.0/fourpi**2*Y0**(-k-1)
f2 = M**(0.5*k+0.75)
f3 = exp(-RR.pi()*2*Y0*M)
return c1*c2*f1*f2*f3
| Python |
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2010 Fredrik Strömberg <stroemberg@mathematik.tu-darmstadt.de>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
r"""
This file implements:
- Vector-valued harmonic weak Maass forms for the above Weil representation.
AUTHORS:
- Fredrik Strömberg
EXAMPLES::
# Construct the vector-valued Harmonic weak maass form corresponding to the holomorphic modular form of weight 2 on Gamma0(11)
sage: WR = WeilRepDiscriminantForm(11,dual=True);WR
Dual of Weil representation of the discriminant form given by ZZ/22ZZ with quadratic form Q(x)=11*x**2 mod 1.
sage: M=VVHarmonicWeakMaassForms(WR,0.5,20);M
Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and values in CC[ZZ/22ZZ].
Representation is Dual of Weil representation of the discriminant form given by ZZ/22ZZ with quadratic form Q(x)=11*x**2 mod 1.
## Vector-valued weakly holomorphic modular form who is a generating function of the partition function.
sage: H=VVHarmonicWeakMaassForms(-6,-1/2,holomorphic=True)
sage: PP={(1/12,0):1,(5/12,0):-1}
sage: F=H.get_element(PP,maxC=10)
"""
import mpmath as mpmath
# ugly fix to support pickling of mpmath matrices
import mpmath.matrices.matrices
mpmath.matrices.matrices.matrix = mpmath.matrix
import random
import tempfile,os
from sage.all import Parent,SageObject,Integer,Rational,SL2Z,QQ,ZZ,CC,RR,Newform,sign
from sage.rings.complex_mpc import MPComplexField,MPComplexNumber
from mysubgroup import *
from automorphic_forms import *
from weil_rep_simple import *
from vv_harmonic_weak_maass_forms_alg import *
from psage.modules.vector_complex_dense import *
from psage.matrix.matrix_complex_dense import *
mp1=mpmath.mpf(1)
mp2=mpmath.mpf(2)
mp4=mpmath.mpf(4)
mppi=mpmath.mp.pi()
mpsqrtpi=mpmath.sqrt(mpmath.mp.pi())
class VVHarmonicWeakMaassForms(AutomorphicFormSpace):
r"""
Space of vector-valued harmonic weak Maass forms for the Weil representation of a finite quadratic module.
"""
def __init__(self,WM,k=QQ(1)/QQ(2),holomorphic=False,dprec=15,sym=True,dr=None,verbose=0,**kwds):
r"""
Create a space of vector-valued harmonic weak Maass forms for the Weil representation of a finite quadratic module.
INPUT:
-''WM'' -- Weil representation (or other finite dimensional representation)
-''k'' -- real
-''dprec''-- integer (default 15)
-''sym'' -- logical (default True)
-''dr'' -- logical (default None) = True if we force usage of dual representation and False if we force standard rep.
EXAMPLES::
sage: WM=WeilRepDiscriminantForm(1)
sage: M=VVHarmonicWeakMaassForms(WM,0.5,20);M
Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and dimension 0.
Representation is Weil representation of the discriminant form given by ZZ/2ZZ with quadratic form Q(x)=1*x**2 mod 1.
A ValueError is raised if we try to construct a space consisting of the zero function alone
sage: WM=WeilRepDiscriminantForm(1,dual=True)
sage: M=VVHarmonicWeakMaassForms(WM,0.5,20)
....
ValueError: Space only contains the zero function! Change weight (1/2) or representation (dual rep.)
"""
# If WR is an integer we construct the corr. Weil rep.
#print "t=",type(kwds)
if is_int(WM):
N=WM
if dr:
WR=WeilRepDiscriminantForm(N,dual=True)
else:
WR=WeilRepDiscriminantForm(N)
#print "k=",k,type(k)
self.WM=WeilRepMultiplier(WR,weight=QQ(k))
elif isinstance(WM,WeilRepDiscriminantForm):
self.WM=WeilRepMultiplier(WM,weight=k)
elif isinstance(WM,WeilRepMultiplier):
self.WM=WM
self.group=MySubgroup(self.WM.group())
self.weight=k
self._weight_rat=QQ(RR(k))
self.dprec=dprec
self._pullback_prec = ceil(dprec*3.32)
self._setupV_prec = ceil(dprec*3.32)
self._verbose=verbose
if not holomorphic:
self._harmonic = True
self._weak_holomorphic=False
else:
self._weak_holomorphic=True
#self.prec=prec
#self.WM=WM
self.WR=self.WM.WR
N=self.WM.WR.N
# if we want to force dual representation and try to use
# a Weil representation WM which is not dual we change WM
multiplier = WeilRepMultiplier(self.WR,weight=QQ(k))
AutomorphicFormSpace.__init__(self,self.group,weight=self.weight,multiplier=multiplier,holomorphic=holomorphic,weak=True,cuspidal=False,dprec=self.dprec,verbose=verbose)
self._is_dual_rep=self.WR._is_dual_rep
if not sym:
self.D=self.WR.D # index set
self.sym_type=0
self.D_as_int=range(0,len(self.WR.D))
else: # use symmetry
self.sym_type=self.get_sym_type()
if(self.sym_type==-1):
Dstart=int(1); Dfinish=int(N-1)
else:
Dstart=int(0); Dfinish=int(N)
self.D=list()
for j in range(Dstart,Dfinish+1):
self.D.append(self.WR.D[j])
self.D_as_int=range(Dstart,Dfinish+1)
self.dim=len(self.D)
if(len(self.D)==0):
if(self.WR.is_dual()):
rep='dual rep.'
else:
rep='reg. rep.'
raise ValueError,"Space only contains the zero function! Change weight (%s) or representation (%s)" %(self._weight_rat,rep)
self.members=list()
def __reduce__(self):
r""" Used for pickling.
EXAMPLES::
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
sage: save(M,"M.sobj")
"""
return(VVHarmonicWeakMaassForms,(self.WR,self.weight,self.dprec,self.sym_type,self._is_dual_rep))
def _cmp_(self,other):
r""" Compare self to other"""
if(not isinstance(other,VVHarmonicWeakMaassForms)):
return False
eq=(self.WR == other.WR) and (self.weight_rat==other.weight_rat)
eq = eq and (self.prec==other.prec) and (self.sym_type==other.sym_type)
eq = eq and (self._is_dual_rep==other._is_dual_rep)
return eq
def _repr_(self):
r""" Return string representation of self.
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(1,dual=False)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,20);M
Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and dimension 1.
Representation is Weil representation of the discriminant form given by ZZ/2ZZ with quadratic form Q(x)=1*x**2 mod 1.
"""
s="Space of Vector-Valued harmonic weak Maass forms"
s+=" on "+str(self.WR.group)+" of weight "+str(self._weight_rat)+" "
s+=" and values in CC[ZZ/"+str(2*self.WR.N)+"ZZ]."
s+="\nRepresentation is "+str(self.WR)
return s
def _latex_(self):
r""" Return LaTeX string representation of self.
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(1,dual=False)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,20)
"""
p=self._weight_rat.numer()
q=self._weight_rat.denom()
old=s="\\begin{verbatim}\\end{verbatim}"
new=""
## s="\\text{Space of Vector-Valued harmonic weak Maass forms on }"
## s+=latex(self.WR.group)+" \\text{ of weight } \\frac{"+str(p)+"}{"+str(q)+"}"
## s+="\\text{and values in } \\mathbb{C}\\left[\\mathbb{Z}/"+latex(2*self.WR.N)+"\\mathbb{Z}\\right]\\text{.}"
## s+="$ \\text{ The representation is }"+latex(self.WR)+"\\text{.}"
s="\\begin{verbatim}\\end{verbatim}"
s+=" Space of Vector-Valued harmonic weak Maass forms on $"
s+=latex(self.WR.group)+"$ of weight $\\frac{"+str(p)+"}{"+str(q)+"}$"
s+="and values in $\\mathbb{C}\\left[\\mathbb{Z}/"+latex(2*self.WR.N)+"\\mathbb{Z}\\right]$. "
s+="The representation is "+self.WR._latex_().replace(old,new)+"."
return s
def an_element(self):
pp = self.smallest_pp()
F = self.get_element(pp,maxC=10)
return F
def get_element(self,PP=None,prec=None,maxD=None,maxC=None,cusp_form=False,ef=True,mp=1E-8,M0_set=None,Y_set=None,SetCs={},use_mpmath=False,constant_term="pp"):
r"""
Get an element of the space of harmonic weak Maass form
with specified principal part.
INPUT:
-''PP'' -- principal part
-''prec'' -- number of digits desired
-''maxD'' -- number of discriminants desired (use without prec, will override any value of prec and set it automatically)
-''maxC'' -- number of coefficients desired for each component (use without prec, will set prec automatically)
-''cusp_form'' -- True if we force c(h,n)=0 with n+q(h)=0.
-``use_mpmath`` -- use mpmath package for multiprecision instead of mpc/mpfr
-``constant_term`` -- set to "pp" if the constant term is included in the principal part (default) and otherwise to "var" if constant terms are treated as variables.
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(1,dual=False)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,20)
sage: PP={(1/2,-1):1}
sage: C=M.get_element(PP,12)
sage: C[0][0][1]
mpc(real='53503.999999999985', imag='-5.0861876730364994e-12')
sage: abs(C[0][0][1]-mpmath.mpf(53504))
mpf('1.5415172456347141e-11')
"""
# There are three permitted formats for the principal part:
# P = dict or list of dicts of the form
# i) {(r/2N,m):c}
# ii) {(r,m):c}
# iii){D:c}
# if the principal part contains
# i) c*e(m+Q(r/2N))
# ii) c*e(m+Q(r))
#iii) c*e(D/4N)
minprec=mp
#M0_set=None
if PP==None:
PP=self.smallest_pp() ## Use the smallest discriminant possible
if(prec == None and maxD==None and maxC==None):
raise ValueError,"Need either desired precision, number of discriminants or number of coefficients!"
elif maxD<>None:
# Need the correct M0 to use for getting this many coefficients.
prec = None
M0_set=ceil(maxD/len(self.D_as_int)) # should be approximately good
for m in range(1,maxD*len(self.D_as_int)+1):
min_d=maxD
## We need to make sure that all discriminants below maxD are accounted for
for r in self.D_as_int:
D=self.D_from_rn((r,m))
if(D < min_d):
min_d=D
if(min_d >= maxD):
M0_set=m
break
elif(maxC<>None):
prec=None
M0_set=maxC
if(not isinstance(PP,(dict,list))):
raise TypeError,"Need principal part in form of dictionary! Got:%s" % PP
if isinstance(PP,dict):
Ptmp = [PP]
else:
Ptmp = PP
type='vector'
P0 = list()
for p in Ptmp:
d={}
for t in p:
if isinstance(t,tuple):
(r,m) = t
if isinstance(t,(int,Integer)):
if not self.is_Heegner_disc(t):
raise ValueError,"Need discriminant satisfying Heegner condition, i.e. square mod %s. Got:%s" %(self.WR.level,t)
(r,m) = self.rn_from_D(t)
d[(r,m)]=p[t]
# Also check that principal part adheres to the symmetry if present
if self.sym_type<>0:
minus_r = self.WR.negative_element(r)
if p.has_key((minus_r,m)):
if p[(minus_r,m)]<>sym_type*p[(r,m)]:
raise ValueError,"Need symmetric principal part! Got:{0}".format(PP)
P0.append(d)
P=P0[0] ### More than one principal part is not implemented yet...
if self._verbose > 0:
print "P=",P
print "M0_set=",M0_set
F=VVHarmonicWeakMaassFormElement(self,P)
if(self._verbose>0):
sys.stdout.flush()
if(M0_set<>None and M0_set <>0):
if self._verbose > 0:
print "M0_set=",M0_set
mpmath.mp.dps = 53 ## Do the estimate in low precision
Y=mpmath.mpf(0.75); M=M0_set
[er1,er2]=F.get_error_estimates(Y,M0_set)
prec=max(er1,er2)
elif(prec<>None):
[Y,M]=self.get_Y_and_M(P,self.weight,prec)
else:
raise ValueError,"Could not deicide number of coefficients to compute from input!"
if(minprec<>None and prec>minprec):
prec=minprec
[Y,M]=self.get_Y_and_M(P,self.weight,minprec)
Q=M+50
if(Y_set<>None):
if(Y_set>0 and Y_set < 0.866):
Y=Y_set
if(self._verbose > 1):
print "prec=",prec
print "Y=",Y
print "M=",M
print "Q=",Q
dold=mpmath.mp.dps
mpmath.mp.dps=max(self._dprec,prec+10)
if self._verbose > 0:
print "using ",mpmath.mp.dps," digits!"
print "P=",P
print "ef=",ef
RF = RealField(self._prec)
if(ef):
if use_mpmath:
Y = mpmath.mp.mpf(Y)
W=vv_harmonic_wmwf_setupV_ef(self,P,Y,M,Q,mpmath.mp.mpf(self.weight))
else:
Y = RF(Y)
mpmath.mp.dps = self._setupV_prec
W=vv_harmonic_wmwf_setupV_mpc2(self,P,Y,M,Q)
mpmath.mp.dps = self._prec
else:
W=vv_harmonic_wmwf_setupV(self,P,Y,M,Q,self.weight,self.sym_type,verbose=self._verbose)
W['space']=self
W['PP']=P
s="tmpWN"+str(self.WR.N)+"-"
dirv=["/local/stroemberg/tmp/","/tmp","."]
try:
for dirs in dirv:
try:
st=os.stat(dirs)
[f,tmpfilename] = tempfile.mkstemp(prefix=s,dir=dirs,suffix='.sobj')
os.close(f)
raise StopIteration()
except OSError:
continue
except StopIteration:
pass
if self._verbose > 0:
print "tmpfilename=",tmpfilename
try:
save(W,tmpfilename)
except MemoryError:
print "Could not save to file!"
pass
#if(PP.has_key((0,0))):
# N=self.set_norm(P=PP,C=SetCs)
#else:
N=self.set_norm(P=PP,C=SetCs)
if self._verbose>0:
print "N = ",N
print "V.parent=",W['V'].parent()
#return (W,N)
if use_mpmath:
C=solve_system_for_vv_harmonic_weak_Maass_waveforms(W,N,deb=False)
else:
C=solve_system_for_vv_harmonic_weak_Maass_waveforms_new(self,W,N)
if self._verbose > 0:
print "C001=",C[0][0][1]
D=self.D
CC=dict()
# we need to shift the indices of C to the (semi-)correct indices
# i.e. for effficiency we use integers instead of rationals
#return C
F=list()
for j in C.keys():
CC[j]=dict()
for k in C[j].keys():
#print "k=",k
CC[j][self.D_as_int[k]]=C[j][k]
F.append(VVHarmonicWeakMaassFormElement(self,P,CC[j],prec))
mpmath.mp.dps=dold
if(len(F)==1):
return F[0]
else:
return F
#return P
# nw we have a
def smallest_pp(self,sgn=-1,n=1):
r""" Returns the smallest valid principal part.
INPUT:
-''sgn'' -- integer, if we want a negative or a positive power of Q in the principal part
(technically speaking it is not really the principal part ifthe power is > 0)
-''n'' -- integer. seek the n-th smallest discriminant
"""
res=list()
sz=ceil(QQ(n)/QQ(len(self.D)))+1
if self._is_dual_rep:
m_start=0
else:
m_start=min(sgn,0)
if(n>1):
m_stop=sz
else:
m_stop=m_start+1
rmin=1; xmin=self.D_as_int[0]
if self._verbose > 0:
print "m_start=",m_start
for m in range(m_start,m_stop):
#m=m*sgn
for x in self.WR.Qv:
y=QQ(x+m)
D=ZZ(y*self.WR.level)
if self._verbose > 0:
print m,self.WR.Qv.index(x),D,y
if(D==0 and sgn<>0): # don't count the zero unless we want zero
continue
if(res.count(D)==0):
res.append(D)
if(abs(y)<xmin and n==1):
j = self.WR.Qv.index(x)
if( j in self.D_as_int):
rmin=j
xmin=x
if(len(res)>=n):
break
if(sgn==-1):
res.sort(reverse=True)
else:
res.sort(reverse=True)
if self._verbose > 0:
print res
# make unique
return {res[n-1]:1}
def next_heegner_disc(self,n=0,sgn=-1,fd=False):
r""" Returns the smallest (in absolute value) discriminant greater than n which satisfies the Heegner condition,
i.e. which appears as an index of Fourier coefficients of forms in M.
INPUT:
-'n' -- integer (default 0), look at discriminants |D| > n
-'sgn' -- integer (default -1), look at negative discriminants with sign = sgn
-'fd'' -- logical(default False), if True, look at fundamental discriminats
"""
for D in range(n+1,2*n+100):
DD=D*sgn
if(self.is_Heegner_disc(DD)):
return DD
raise ArithmeticError," COuld not find any Heegner discriminat > %s !" %n
def is_Heegner_disc(self,D):
r""" Returns true is \pm D is appears as an index of a Fourier coefficients of M,
i.e. in the simplest case if it is a square mod 4N
INPUT:
-''D'' -- integer
"""
Dr = D % self.WR.level
if self.WR.is_dual():
Dr = -Dr
if Dr in self.WR.Qv_times_level:
return True
else:
return False
def get_Y_and_M(self,PP,weight,prec,Yin=None):
r"""
Find a good Y and M for computing coefficents with precison 10^-prec
"""
# generalized_level
if(Yin<>None):
Y0=Yin
else:
Y0=min(self.group.minimal_height(),0.5)
Cmax=1
Kmax=0
Cmax=max(PP.values())
for t in PP.keys():
if isinstance(t,tuple):
(c,l) = t
elif isinstance(t,(int,Integer)):
(c,l)=self.rn_from_D(t)
else:
raise ValueError,"Incorrect principal part: t={0}".format(t)
if c in self.WR.D:
tmp=l+self.WR.Qv[self.WR.D.index(c)]
elif c in range(len(self.WR.Qv)):
tmp=l+self.WR.Qv[c]
else:
raise ValueError,"Incorrect principal part: c,l={0},{1}".format(c,l)
if self._verbose>0:
print "tmp=",tmp
if abs(tmp)>Kmax:
Kmax=abs(tmp)
#x
# then get corresponding M
#print "Kmax=",Kmax
#print "Cmax=",Cmax
M0=self.get_M(Y0,Kmax,Cmax,prec)
return [Y0,M0]
def get_M(self,Y,K0,K1,prec):
r""" Computes truncation point for Harmonic Maass waveform.
"""
# # Use low precision
dold=mpmath.mp.dps
# mpmath.mp.dps=int(mpmath.ceil(abs(mpmath.log10(eps))))+5
mpmath.mp.dps=max(dold,prec)+5
twopi=2*mpmath.pi()
twopiy=twopi*mpmath.mpf(Y)
# an extra two for the accumulation of errors
eps=mpmath.mpf(10)**mpmath.mpf(-prec)
minm=max(10,abs(int(1-self.weight)+1)/(2*mpmath.pi()*Y))
#print "K0=",K0
#print "K1=",K1
[Cp0,Cp1]=self.get_Cp(K0)
Cm=self.get_Cm(K0,K1)
#print "Cp0,Cp1,Cm=",mppr(Cp0),mppr(Cp1),mppr(Cm)
fak=len(self.WR.D)
try:
for m in range(minm,minm+10000):
errest1=fak*self.err_est_vv_hwmf_pos(Y,m,Cp0,Cp1)
errest2=fak*self.err_est_vv_hwmf_neg(Y,m,Cm)
#print "er1+(",m,")=",mppr(errest1)
#print "er2-(",m,")=",mppr(errest2)
if(max(abs(errest1),abs(errest2))<eps):
raise StopIteration()
raise ArithmeticError,"Could not find M<%s such that error bound in truncation is <%s! and Y,K0,K1=%s,%s,%s \n err+=%s \n err-=%s" %(m,eps,mppr(Y),K0,K1,mppr(errest1),mppr(errest2))
except StopIteration:
if(self._verbose > 2):
print "er +=",errest1
print "er -=",errest2
print "m=",m
print "Y=",Y
mpmath.mp.dps=dold
return m
def get_sym_type(self):
r"""
Calculate the symmetry type (even/odd) for the combination
of representation and weight.
"""
t=self.weight-0.5
if(not is_int(t)):
raise ValueError, "Need half-integral value of weight! Got k=%s, k-1/2=%s" %(self.weight,t)
ti=Integer(float(t))
if is_odd(ti):
sym_type=-1
else:
sym_type=1
if self._is_dual_rep:
sym_type=-sym_type
return sym_type
def get_Cp(self,K0):
r"""
Set constants for the error estimates.
"""
#if(self.weight>=1.5):
# raise ValueError," Error bounds only accurate for k<1.5! got k=%s" % self.weight
mp2=mpmath.mpf(2)
twominusk=mp2-self.weight
tmp=mpmath.mpf(len(self.WR.D))
tmp0=mpmath.sqrt(tmp)+mpmath.mpf(1)
tmp1=mpmath.pi()*mpmath.mpf(4)
Cp1=tmp1*mpmath.sqrt(abs(K0))
tmp1=mpmath.power(tmp1,twominusk)
tmp2=mpmath.besseli(1-self.weight,1.0)
tmp3=mpmath.zeta(twominusk)
if(K0==0):
tmp4=1
else:
tmp4=mpmath.power(K0,1-self.weight)
Cp0=tmp0*tmp1*tmp2*tmp3*tmp4
return [Cp0,Cp1]
def get_Cm(self,K0,K1):
r""" Constant in error bound for negative part.
"""
#if(self.weight>=1.5):
# raise ValueError," Error bounds only accurate for k<1.5! got k=%s" % self.weight
twominusk=mp2-self.weight
tmp=mpmath.mpf(len(self.WR.D))
tmp1=mppi*mp2
tmp1=mpmath.power(tmp1,twominusk)
tmp3=mpmath.zeta(twominusk)
if(K0==0):
tmp4=1
else:
tmp4=mpmath.power(K0,1-self.weight)
g1=mpmath.gamma(1-self.weight)
g2=mpmath.gamma(2-self.weight)
Cm=mp2/g1+mp4*tmp1/g1/g2*tmp*tmp3*tmp4
return Cm
def err_est_vv_hwmf_pos(self,Y,m,Cp0,Cp1):
r""" Error estimate. See paper...
"""
#if(self.weight>=1.5):
# raise ValueError," Error bounds only accurate for k<1.5! got k=%s" % weight
twopiY=mpmath.pi()*mp2*Y
fourpiY=mp2*twopiY
tmp=mpmath.mpf(len(self.WR.D))
etmp1=mpmath.sqrt(mpmath.mpf(m))-Cp1/fourpiY
etmp2=mpmath.exp(-twopiY*etmp1**2)
etmp3=mp2+mpsqrtpi*Cp1/mp2/mpmath.sqrt(twopiY)
etmp4=Cp0/twopiY
err_pos=tmp*etmp4*etmp3*etmp2
return err_pos
def err_est_vv_hwmf_neg(self,Y,m,Cm):
r""" Errorbound for negative part.
"""
#print "Cm=",Cm
#if(self.weight>=1.5):
# raise ValueError," Error bounds only accurate for k<1.5! got k=%s" % weight
etmp1=abs(mpmath.mpf(1-self.weight))
twopiY=mp2*Y*mpmath.pi()
tmp=mpmath.mpf(len(self.WR.D))
if(self.weight>0):
etmp2=mp2*mpmath.mpf(m-1)*twopiY
etmp2=mpmath.power(etmp2,-self.weight)
etmp3=mpmath.exp(-twopiY*mpmath.mpf(m))
etmp4=mp1/(mp1-mpmath.exp(-twopiY))
err_neg=Cm*tmp*etmp1*etmp2*etmp3*etmp4
else:
etmp2=mpmath.power(mp2,-self.weight)
etmp3=mpmath.power(twopiY,-self.weight-mp1)
etmp4=mpmath.power(mpmath.mpf(m),-self.weight)
etmp5=mpmath.exp(-twopiY*mpmath.mpf(m))
err_neg=Cm*tmp*etmp1**2*etmp2*etmp3*etmp4*etmp5
return err_neg
#def set_norm_vv_harmonic_weak_maass_forms(WR,cusp_form=True,holomorphic=True,SetCs=None):
def set_norm(self,P={},C={},c_t="pp"):
r"""
Set the normalization dictionary corresponding to self and computation of a form
with principal part P and set fourier coefficients in C
"""
N=dict()
if isinstance(P,list):
Pl=P
else:
Pl=[P]
if isinstance(C,list):
Cl=C
else:
Cl=[C]
if len(Pl)>0:
N['comp_dim']=len(Pl)
if len(Cl)>0:
if len(Cl)<>len(Pl):
raise ValueError,"Need same number of principal parts and coefficients to set!"
keys = Cl[0].keys()
for j in range(1,N['comp_dim']):
if Cl[j].keys()<>keys:
raise ValueError,"Need to set the same coefficients! (or call the method more than once)"
else:
Cl=[]
for j in range(N['comp_dim']):
Cl.append(C)
N['Vals']=list()
N['Vals']=list()
N['SetCs']=list()
for i in range(N['comp_dim']):
N['Vals'].append({})
N['Vals'].append({})
N['SetCs'].append([])
for j in range(len(self.WR.D)):
a=self.WR.D[j]
x=self.WR.Qv[j]
#N['Vals'][i][(0,j)]=dict()
if x==0:
if c_t=="pp":
#N['comp_dim']=N['comp_dim']+1
N['SetCs'][i].append((j,0))
if Pl[i].has_key((0,j)):
N['Vals'][i][(j,0)]=Pl[i][(j,0)]
else:
N['Vals'][i][(j,0)]=0 #P[(0,0)]
elif x<0 and self._holomorphic:
N['SetCs'][i].append((j,0))
N['Vals'][i][(j,0)]=0 #P[(0,0)]
for (r,n) in Cl[i].keys():
if(N['SetCs'][i].count((r,n))==0):
N['SetCs'][i].append((r,n))
N['Vals'][i][(r,n)]=Cl[i][(r,n)]
return N
def rn_from_D(self,D):
r""" Find the pair(s) (r,n) s.t. +-D/4N= n +- q(r) for D in D
INPUT:
-''D'' -- integer or list of integers
OUTPUT:
-''t'' -- tuple (r,n) or list of tuples
"""
if(isinstance(D,list)):
lout=list()
for DD in D:
t=self._one_rn_from_D(DD)
if(t<>None):
lout.append(t)
return lout
else:
return self._one_rn_from_D(D)
def _one_rn_from_D(self,D):
r""" Find the (r,n) s.t. +-D/4N= n +- q(r)
"""
Dv=QQ(D)/QQ(self.WR.level)
sig=1
if self.WR._is_dual_rep:
sig=-1
for r in self.WR.D:
x=self.WR.Q(r)
if(is_int(Dv-x)):
rr=self.WR.D.index(r)
n=sig*int(Dv-x)
return (rr,n)
return None
def D_from_rn(self,t):
r""" Find the D s.t. +-D/4N= n +- q(r)
"""
if(isinstance(t,list)):
lout=list()
for (r,n) in t:
D=self._one_D_from_rn((r,n))
if(D<>None):
lout.append(D)
return lout
else:
return self._one_D_from_rn(t)
def _one_D_from_rn(self,t):
r""" Find the D s.t. +-D/4N= n +- q(r)
"""
#print "t=",t,type(t)
if(not isinstance(t,tuple)):
raise TypeError,"Need a tuple of integers! Got:%s" % t
(r,n)=t
sig=1
#print "r=",r
if(r in self.WR.D):
x=self.WR.Q(r)
elif(r in self.WR.D_as_integers):
x=self.WR.Q(self.WR.D[r])
else:
raise TypeError,"Need (r,n) in proper format forcoefficients! I.e. n integer and r in D or integer!"
#print "x=",x
if self.WR._is_dual_rep:
sig=-1
D=sig*self.WR.level*(n+sig*x)
return D
def dist_from_int(x):
r"""
Return the distance to the closest integer to x, as well as the closest integer
"""
m1=floor(x); m2=ceil(x)
er1=abs(m1-x); er2=abs(m2-x)
#print "er1=",er1
#print "er2=",er2
if(er1<er2):
return [er1,m1]
elif(er2<er1):
return [er2,m2]
else:
random.seed()
d=random.getrandbits(1)
#print "d=",d
if(d==0):
return [er1,m1]
else:
return [er2,m2]
def norm_sci_pretty_print(c,nd=0,es='e',latex_pow=False):
x=c.real; y=c.imag
if(abs(x)<1E-5):
s = sci_pretty_print(x,2,es,latex_pow)
else:
s = sci_pretty_print(x,nd,es,latex_pow)
if(y>0):
p="+"
else:
p=""
if(abs(y)<1E-5):
s=s+p+sci_pretty_print(y,2,es,latex_pow)+"i"
else:
s=s+p+sci_pretty_print(y,nd,es,latex_pow)+"i"
return s
def sci_pretty_print(s,nd=0,es='e',latex_pow=False):
r""" Take a string representation of a number and returns it in scientific notation to desired number of digits.
"""
# remove leading sign #
#x=mpmath.mp.mpf(s)
#if(abs(x)<1):
# raise NotImplementedError," Only implemented for |x|>1!! got x=%s" %x
if(not isinstance(s,str)):
s=str(s)
s=s.replace("(","")
s=s.replace(")","")
s=s.strip()
if(s.count("I")+s.count("i")+s.count("j")>0):
# Get a default complex notation
s=s.replace("*","")
s=s.replace("I","i")
s=s.replace("j","i")
## We have to find imaginary and real parts
l=s.split("+")
if(len(l)>1):
(s1,s2)=l
else:
(s1,s2)=s.split("-")
s2="-"+s2
if(s1.count("i")>0): # put imaginary part in standard form
ims=s1.strip("i").lstrip(); res=s2
else:
ims=s2.strip("i").lstrip(); res=s1
if(ims==""): ims="1"
sres=sci_pretty_print(res,nd,es,latex_pow)
sims=sci_pretty_print(ims,nd,es,latex_pow)
if(sres=="0"): sres=""
if(sims=="0"):
sims=""
else:
sims=sims+"i"
if(sims.count("-")>0):
return sres+" "+sims.replace(" -"," - ")
elif(sims<>"" and sres<>""):
return sres+" + "+sims
elif(sres<>""):
return sres
elif(sims<>""):
return sims
else:
raise ValueError,"Could not find pretty print for s=%s " %s
s=s.strip()
if( len(s.replace(".","").strip("0"))==0):
return "0"
if(s.count(".")==0):
s=s+".0"
if(s[0]=='-'):
ss=s.strip("-")
ss=sci_pretty_print(ss,nd,es,latex_pow)
return "-"+ss
l=s.split(".")
if(len(l)>1):
(sint,sdigs)=l
elif(len(s)<nd):
return s
elif(len(l)>0):
sint=l[0]
sdigs=""
else:
raise ValueError," Can not do pretty print for s=%s" %s
if(sdigs.count("e")>0):
l=sdigs.split("e")
sdigs=l[0]
ex=int(l[1])
else:
ex=0
if(len(sint)==1 and sint=="0"):
# find the number of leading zeros
sss=sdigs.lstrip("0")
nz=len(sdigs)-len(sss)+1
if(nz<10):
ex="-0"+str(nz)
else:
ex=str(-nz)
# Fix correct rounding
rest=sss[nd:len(sss)]
ix=nd-1
if(len(rest)>0):
if(int(rest) < 5*10**(len(rest)-1)):
# print " round < : since "+rest+"<"+str(5*10**(len(rest)-1))
d=int(sss[ix])
elif(int(rest) > 5*10**(len(rest)-1)):
# print " round > : since "+rest+">"+str(5*10**(len(rest)-1))
d=int(sss[ix])+1
else:
# if we have an exact half we round randomly
random.seed()
d=int(sss[ix])+int(random.getrandbits(1))
if(d<10):
ssdigs=sss[0:ix]+str(d) # We account for the leading digit too
else:
ssdigs=sss[0:ix-1]+str(int(sss[ix-1])+1)+str(d-10) # We account for the leading digit too
if(latex_pow):
return ssdigs[0]+"."+ssdigs[1:nd]+es+"10^{"+ex+"}"
else:
return ssdigs[0]+"."+ssdigs[1:nd]+es+ex
ex=int(ex)+len(sint)-1
if(abs(ex)<10):
ex="0"+str(ex)
else:
ex=str(ex)
#ssdigs=sint[1:len(sint)]+sdigs
# cut away to nd digits
if(nd>0):
#ssdigs=sdigs[0:nd-1] # We acount the leading digit too
# Try to do correct rounding
rest=sdigs[nd-len(sint):len(sdigs)]
#print "sdigs=",sdigs," nd=",nd
#print "rest=",rest
ix=nd-len(sint)-1
if(len(rest)>0):
if(int(rest) < 5*10**(len(rest)-1)):
# print " round < : since "+rest+"<"+str(5*10**(len(rest)-1))
d=int(sdigs[ix])
elif(int(rest) > 5*10**(len(rest)-1)):
# print " round > : since "+rest+">"+str(5*10**(len(rest)-1))
d=int(sdigs[ix])+1
else:
# if we have an exact half we round randomly
random.seed()
d=int(sdigs[ix])+int(random.getrandbits(1))
if(d<10):
ssdigs=sdigs[0:ix]+str(d) # We account for the leading digit too
else:
if(ix>0):
ssdigs=sdigs[0:ix-1]+str(int(sdigs[ix-1])+1)+str(d-10) # We account for the leading digit too
else:
ssdigs=str(d-10) # We account for the leading digit too
if(len(sint)==1):
sint=str(int(sint)+1)
else:
ll=len(sint); stmp=sint;
sint=stmp[1:ll-1]+str(int(stmp[ll-1])+1)
else:
ssdigs=sdigs[0:ix]
#print "rest=",rest,len(rest)
ssdigs=sint[1:len(sint)]+ssdigs
else:
ssdigs=sint[1:len(sint)]+sdigs
if(latex_pow):
res=sint[0]+"."+ssdigs+" \cdot 10^{"+ex+"}"
return res
else:
return sint[0]+"."+ssdigs+es+ex
## def dist_from_int(x):
## r""" Compute distance from x to the nearest integer.
## """
## d1=abs(x-mpmath.mp.floor(x))
## d2=abs(x-mpmath.mp.ceil(x))
## if(d1<d2):
## y=mpmath.mp.floor(x)
## d=d1
## else:
## y=mpmath.mp.ceil(x)
## d=d2
## return RR(d),y
## def is_int(q):
## r"""
## Find out if the rational number q is an integer.
## """
## try:
## if(isinstance(q,sage.rings.integer.Integer) or isinstance(q,int)):
## return True
## if(isinstance(q,sage.rings.rational.Rational)):
## n=q.denominator()
## if(n==1):
## return True
## if(floor(q)==ceil(q)):
## return True
## except TypeError:
## pass
## return False
def solve_system_for_vv_harmonic_weak_Maass_waveforms(W,N=None,deb=False,gr=False,cn=False):
r"""
Solve the linear system to obtain the Fourier coefficients of Maass forms
INPUT:
- ``W`` -- (system) dictionary
- ``W['Ms']`` -- M start
- ``W['Mf']`` -- M stop
- ``W['nc']`` -- number of cusps
- ``W['V']`` -- matrix of size ((Ms-Mf+1)*nc)**2
- ``W['RHS']`` -- right hand side (for inhomogeneous system) matrix of size ((Ms-Mf+1)*nc)*(dim)
- ``N`` -- normalisation (dictionary, output from the set_norm_for_maass function, default None)
- if N=None we assume that the solution is uniquely determined by the prinicpal part (in the right hand side)
- ``N['SetCs']`` -- Which coefficients are set
- ``N['Vals'] `` -- To which values are these coefficients set
- ``N['comp_dim']``-- How large is the assumed dimension of the solution space
- ``N['num_set']`` -- Number of coefficients which are set
- ``deb`` -- print debugging information (default False)
- ''gr'' -- only return the reduced matrix and right hand side. do not perform the solving .
- ''cn'' -- logical (default False) set to True to compute the max norm of V^-1
OUTPUT:
- ``C`` -- Fourier coefficients
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: mpmath.mp.dps=20
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: Y=mpmath.mpf(0.5)
sage: W=setup_matrix_for_Maass_waveforms(G,R,Y,12,22)
sage: N=set_norm_maass(1)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='-1.8055426724989656270259e-14', imag='1.6658248366482944572967e-19')
If M is too large and the precision is not high enough the matrix might be numerically singular
W=setup_matrix_for_Maass_waveforms(G,R,Y,20,40)
sage: C=solve_system_for_Maass_waveforms(W,N)
Traceback (most recent call last)
...
ZeroDivisionError: Need higher precision! Use > 23 digits!
Increasing the precision helps
sage: mpmath.mp.dps=25
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='3.780824715556976438911480324e-25', imag='2.114746048869188750991752872e-99')
"""
V=W['V']
Ms=W['Ms']
Mf=W['Mf']
WR=W['WR']
#nc=W['nc']
Ml=Mf-Ms+1
get_reduced_matrix=gr
comp_norm=cn
if(W['sym_type']==1):
setD=range(0,WR.N+1) # 0,1,...,N
elif(W['sym_type']==-1):
setD=range(1,WR.N) # 1,2,...,N-1 (since -0=0 and -N=N)
nc=len(setD)
if(V.cols<>Ml*nc or V.rows<>Ml*nc):
raise Exception," Wrong dimension of input matrix!"
if(N==None):
SetCs=[]
Vals=dict();Vals[0]=dict()
for b in WR.D:
if(WR.Q(b)==0):
Vals[0][b]=dict()
SetCs.append((b,0))
Vals[0][b][0]=0
comp_dim=1
else:
SetCs=N['SetCs']
Vals=N['Vals']
comp_dim=N['comp_dim']
#for a in Vals.keys():
# Vals[a]=mpmath.mp.mpc(Vals[a])
if(Ms<0):
use_sym=0
else:
use_sym=1
if(use_sym==1 and len(SetCs)>0):
num_set=len(SetCs)-1
else:
num_set=len(SetCs)
t=V[0,0]
if(isinstance(t,float)):
mpmath_ctx=mpmath.fp
else:
mpmath_ctx=mpmath.mp
#print "mpmath_ctx=",mpmath_ctx
#use_symmetry=False
RHS=mpmath_ctx.matrix(int(Ml*nc-num_set),int(comp_dim))
if(W.has_key('RHS')):
if(W['RHS'].cols<>comp_dim):
raise ValueError,"Incorrect number of right hand sides!"
LHS=mpmath_ctx.matrix(int(Ml*nc-num_set),int(Ml*nc-num_set))
roffs=0
if(deb):
print "Ml=",Ml
print "num_set=",num_set
print "SetCs=",SetCs
print "Vals=",Vals
print "V.rows=",V.rows
print "V.cols=",V.cols
print "LHS.rows=",LHS.rows
print "LHS.cols=",LHS.cols
print "RHS.rows=",RHS.rows
print "RHS.cols=",RHS.cols
print "use_sym=",use_sym
if(V.rows <> nc*Ml):
raise ArithmeticError," Matrix does not have correct size!"
if( len(SetCs)>0):
for a in range(nc):
for n in range(Ms,Mf+1):
r=a*Ml+n-Ms
if(SetCs.count((a,n))>0):
print " set row a,n=",a,n
roffs=roffs+1
continue
for fn_j in range(comp_dim):
if(W.has_key('RHS')):
RHS[r-roffs,fn_j]=-W['RHS'][r,fn_j]
for (i,cset) in SetCs:
v=Vals[fn_j][i][cset]
if(mpmath_ctx==mpmath.mp):
tmp=mpmath_ctx.mpmathify(v)
elif(isinstance(v,float)):
tmp=mpmath_ctx.mpf(v)
else:
tmp=mpmath_ctx.mpc(v)
tmp=tmp*V[r,i*Ml+cset-Ms]
RHS[r-roffs,fn_j]=RHS[r-roffs,fn_j]-tmp
coffs=0
for b in range(nc):
for l in range(Ms,Mf+1):
k=b*Ml+l-Ms
if(SetCs.count((b,l))>0):
#print " set col b,l=",b,l
coffs=coffs+1
continue
LHS[r-roffs,k-coffs]=V[r,k]
#print "LHS[",r,k,"]=",LHS[r-roffs,k-coffs]
else:
LHS=V
RHS=-W['RHS']
if(get_reduced_matrix):
return [LHS,RHS]
dpold=mpmath.mp.dps
maxit=100;i=0
done=False
while (not done and i<=maxit):
try:
A, p = mpmath_ctx.LU_decomp(LHS)
done=True
except ZeroDivisionError:
try:
sinf=smallest_inf_norm_mpmath(LHS)
t=int(mpmath_ctx.ceil(-mpmath_ctx.log10(sinf)))
except ValueError:
print "Warning: Got smallest inf. norm=",sinf
t = mpmath.mp.dps+10
mpmath.mp.dps=t+5*i; i=i+1
print "raising number of digits to:",mpmath.mp.dps
# raise ZeroDivisionError,"Need higher precision! Use > %s digits!" % t
if(i>=maxit):
raise ZeroDivisionError,"Can not raise precision enough to solve system! Should need > %s digits! and %s digits was not enough!" % (t,mpmath.mp.dps)
# Use the LU-decomposition to compute the inf- norm of A^-1
# Note that Ax=LUx=y and we get columns of A^-1 by solving for y1=(1,0,...), y2=(0,1,0,...) etc.
if(comp_norm):
max_norm=0
for j in range(LHS.rows):
y=mpmath_ctx.matrix(LHS.rows,int(1)); y[j,0]=1
b = mpmath_ctx.L_solve(A,y, p)
TMP = mpmath_ctx.U_solve(A, b)
tmpnorm=max(map(abs,TMP))
if(tmpnorm>max_norm):
max_norm=tmpnorm
print "max norm of V^-1=",max_norm
mpmath.mp.dps=dpold
X=dict()
for fn_j in range(comp_dim):
X[fn_j] = dict() #mpmath.matrix(int(Ml),int(1))
print "len(B)=",len(RHS.column(fn_j))
b = mpmath_ctx.L_solve(A, RHS.column(fn_j), p)
#return b
TMP = mpmath_ctx.U_solve(A, b)
roffs=0
res = mpmath_ctx.norm(mpmath_ctx.residual(LHS, TMP, RHS.column(fn_j)))
print "res(",fn_j,")=",res
for ai in range(nc):
X[fn_j][ai]=dict()
for n in range(Ms,Mf+1):
ni=Ml*ai+n-Ms
if(SetCs.count((ai,n))>0):
roffs=roffs+1
# print "X[",fn_j,",",n,",Vals[fn_j][n]
# print "X[",fn_j,",",n,",Vals[fn_j][n]
X[fn_j][ai][n]=mpmath_ctx.mpc(Vals[fn_j][ai][n])
continue
#print "roffs,n,ni=",roffs,n,ni-roffs
#print "TMP=",TMP[ni-roffs,0]
X[fn_j][ai][n]=TMP[ni-roffs,0]
# return x
return X
def solve_system_for_vv_harmonic_weak_Maass_waveforms_new(H,W,N=None,gr=False,cn=False):
r"""
Solve the linear system to obtain the Fourier coefficients of Maass forms
INPUT:
- ``H`` -- Space of vector-valued modular forms
- ``W`` -- (system) dictionary
- ``W['Ms']`` -- M start
- ``W['Mf']`` -- M stop
- ``W['nc']`` -- number of cusps
- ``W['V']`` -- matrix of size ((Ms-Mf+1)*nc)**2
- ``W['RHS']`` -- right hand side (for inhomogeneous system) matrix of size ((Ms-Mf+1)*nc)*(dim)
- ``N`` -- normalisation (dictionary, output from the set_norm_for_maass function, default None)
- if N=None we assume that the solution is uniquely determined by the prinicpal part (in the right hand side)
- ``N['SetCs']`` -- Which coefficients are set
- ``N['Vals'] `` -- To which values are these coefficients set
- ``N['comp_dim']``-- How large is the assumed dimension of the solution space
- ``N['num_set']`` -- Number of coefficients which are set
- ''gr'' -- only return the reduced matrix and right hand side. do not perform the solving .
- ''cn'' -- logical (default False) set to True to compute the max norm of V^-1
OUTPUT:
- ``C`` -- Fourier coefficients
EXAMPLES::
sage: G=MySubgroup(Gamma0(1))
sage: mpmath.mp.dps=20
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: Y=mpmath.mpf(0.5)
sage: W=setup_matrix_for_Maass_waveforms(G,R,Y,12,22)
sage: N=set_norm_maass(1)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='-1.8055426724989656270259e-14', imag='1.6658248366482944572967e-19')
If M is too large and the precision is not high enough the matrix might be numerically singular
W=setup_matrix_for_Maass_waveforms(G,R,Y,20,40)
sage: C=solve_system_for_Maass_waveforms(W,N)
Traceback (most recent call last)
...
ZeroDivisionError: Need higher precision! Use > 23 digits!
Increasing the precision helps
sage: mpmath.mp.dps=25
sage: R=mpmath.mpf(9.533695261353557554344235235928770323821256395107251982375790464135348991298347781769255509975435366)
sage: C=solve_system_for_Maass_waveforms(W,N)
sage: C[0][2]*C[0][3]-C[0][6]
mpc(real='3.780824715556976438911480324e-25', imag='2.114746048869188750991752872e-99')
"""
V=W['V']
Ms=W['Ms']
Mf=W['Mf']
WR=W['WR']
#nc=W['nc']
Ml=Mf-Ms+1
get_reduced_matrix=gr
verbose = H._verbose
comp_norm=cn
if W['sym_type']==1:
setD=range(0,WR.N+1) # 0,1,...,N
elif W['sym_type']==-1:
setD=range(1,WR.N) # 1,2,...,N-1 (since -0=0 and -N=N)
nc=len(setD)
if V.ncols()<>Ml*nc or V.nrows()<>Ml*nc:
raise Exception," Wrong dimension of input matrix!"
if N==None:
SetCs=[[]]
Vals=[{}]
for b in WR.D:
if(WR.Q(b)==0):
SetCs[0].append((b,0))
Vals[0][(b,0)]=0
comp_dim=1
else:
SetCs=N['SetCs']
Vals=N['Vals']
comp_dim=N['comp_dim']
#for a in Vals.keys():
# Vals[a]=mpmath.mp.mpc(Vals[a])
if Ms<0:
use_sym=0
else:
use_sym=1
if verbose>0:
print "Before SetCs=",SetCs
if use_sym==1:
if len(SetCs[0])>0:
num_set=0 #
for (r,m) in SetCs[0]:
if r in H.D_as_int:
num_set = num_set + 1
else:
num_set=len(SetCs[0])
t=V[0,0]
CF=MPComplexField(H._prec)
MS = MatrixSpace(CF,int(Ml*nc-num_set),int(comp_dim))
RHS=Matrix_complex_dense(MS,0,True,True)
#if W.has_key('RHS'):
# if(W['RHS'].ncols()<>comp_dim):
# raise ValueError,"Incorrect number of right hand sides!"
MS = MatrixSpace(CF,int(Ml*nc-num_set),int(Ml*nc-num_set))
LHS=Matrix_complex_dense(MS,0,True,True)
if verbose>0:
print "Ml=",Ml
print "num_set=",num_set
print "SetCs=",SetCs
print "Vals=",Vals
print "V.rows=",V.nrows()
print "V.cols=",V.ncols()
print "LHS.rows=",LHS.nrows()
print "LHS.cols=",LHS.ncols()
print "RHS.rows=",RHS.nrows()
print "RHS.cols=",RHS.ncols()
print "use_sym=",use_sym
print "N=",N
num_rhs=0
if(W.has_key('RHS')):
num_rhs=W['RHS'].ncols()
if num_rhs<>1 and num_rhs<>comp_dim:
raise ValueError,"Need same number of right hand sides (or just one) as the number of set coefficients!"
if V.nrows() <> nc*Ml:
raise ArithmeticError," Matrix does not have correct size!"
roffs=0
for a in range(nc):
for n in range(Ms,Mf+1):
r=a*Ml+n-Ms
if SetCs[0].count((a,n))>0 and a in setD:
#print " set row a,n=",a,n
roffs=roffs+1
continue
for fn_j in range(comp_dim):
if(W.has_key('RHS')):
#ztmp=CF(ztmp.real(),ztmp.imag())
if num_rhs==comp_dim:
rhs_j =fn_j
else:
rhs_j = 0
RHS[r-roffs,fn_j]=-W['RHS'][r,rhs_j]
for (i,cset) in SetCs[fn_j]:
if i in setD:
v=Vals[fn_j][(i,cset)]
# print "r,k=",r,i*Ml+cset-Ms
tmp = CF(v)*V[r,i*Ml+cset-Ms]
RHS[r-roffs,fn_j]=RHS[r-roffs,fn_j]- tmp
coffs=0
for b in range(nc):
for l in range(Ms,Mf+1):
k=b*Ml+l-Ms
if SetCs[0].count((b,l))>0 and b in setD:
coffs=coffs+1
continue
LHS[r-roffs,k-coffs]=V[r,k]
#print "LHS[",r,k,"]=",LHS[r-roffs,k-coffs]
#else:
# LHS = V
# RHS = -W['RHS']
if get_reduced_matrix:
return [LHS,RHS]
maxit=100;i=0
done=False
dps0=CF.prec()
while (not done and i<=maxit):
try:
Q,R = LHS.qr_decomposition()
done=True
except ZeroDivisionError:
t=int(ceil(-log_b(smallest_inf_norm(LHS),10)))
dps=t+5*i; i=i+1
print "raising number of digits to:",dps
LHS.set_prec(dps)
# raise ZeroDivisionError,"Need higher precision! Use > %s digits!" % t
if i>=maxit:
raise ZeroDivisionError,"Can not raise precision enough to solve system! Should need > %s digits! and %s digits was not enough!" % (t,dps)
if comp_norm:
max_norm=LHS.norm()
for j in range(LHS.rows):
#y=mpmath_ctx.matrix(LHS.rows,int(1)); y[j,0]=1
y = Vector_complex_dense(vector(F,LHS.rows).parent(),0)
y[j]=1
TMP = RHS.solve(b) #pmath_ctx.U_solve(A, b)
tmpnorm=max(map(abs,TMP))
if(tmpnorm>max_norm):
max_norm=tmpnorm
print "max norm of V^-1=",max_norm
X=dict()
for fn_j in range(comp_dim):
X[fn_j] = dict() #mpmath.matrix(int(Ml),int(1))
#b = mpmath_ctx.L_solve(A, RHS.column(fn_j), p)
v = RHS.column(fn_j)
print "len(B)=",len(v)
TMP = LHS.solve(v)
#TMP = mpmath_ctx.U_solve(A, b)
roffs=0
#res = mpmath_ctx.norm(mpmath_ctx.residual(LHS, TMP, RHS.column(fn_j)))
res = (LHS*TMP-v).norm()
print "res(",fn_j,")=",res
for ai in range(nc):
X[fn_j][ai]=dict()
for n in range(Ms,Mf+1):
ni=Ml*ai+n-Ms
if SetCs[fn_j].count((ai,n))>0 and ai in setD:
roffs=roffs+1
X[fn_j][ai][n]=CF(Vals[fn_j][(ai,n)])
continue
if verbose>0:
print "ni-roffs=",ni-roffs
X[fn_j][ai][n]=TMP[ni-roffs]
return X
def vv_harmonic_wmwf_phase2_tst1(M,PP,C,Ns,Is=None,prec=20,Yin=None):
try:
CC=vv_harmonic_wmwf_phase2_1(M,PP,C,Ns,Is,prec,Yin)
return CC
except KeyboardInterrupt:
print "Stopping!"
def vv_harmonic_wmwf_phase2_tst2(M,PP,C,Ns,Is=None,prec=20,Yin=None,do_save=False):
try:
CC=vv_harmonic_wmwf_phase2_2(M,PP,C,Ns,Is,prec,Yin,do_save)
return CC
except KeyboardInterrupt:
print "Stopping!"
def vv_harmonic_wmwf_phase2_1(M,PP,C,Ns,Is=None,prec=20,Yin=None):
r"""
Phase 2 for vector-valued harmonic weak Maass forms.
"""
WR=M.WR;
kappa=M.weight
D=WR.D
Dsym=M.D # the symmetrized index set
if(len(Dsym)<>len(C.keys())):
raise ValueError,"Got incompatible coefficient vector! indices=%s" % C.keys()
#we only use symmetrized values
if(Is==None):
Is=[0,len(D)]
N=WR.N
t=-1
sym_type=mpmath.mpf(M.sym_type)
verbose=M._verbose
#ndig=12
eps=mpmath.power(mpmath.mpf(10),mpmath.mpf(-prec))
if(verbose>0):
print "eps=",eps
print "Yin=",Yin
betai=dict();mbeta=dict(); mbetai=dict(); mm=dict(); mmm=dict()
mptwo=mpmath.mp.mpf(2); mpfour=mpmath.mp.mpf(4)
twopi=mptwo*mpmath.pi(); twopii=mpmath.mp.mpc(0,twopi)
fourpi=mptwo*twopi; mp0=mpmath.mpf(0)
weight=mpmath.mp.mpf(kappa); weight_over_two=weight/mpmath.mp.mpf(2)
K0=0; K1=0
for (beta,m) in PP:
#if( (not beta in D) or (not 1-beta in D)):
if( (not beta in D) and (not 1-beta in D)):
raise Exception,"Need beta=%s in D=%s" %(beta,D)
betai[beta]=D.index(beta)
mbeta[beta]=1-beta
mbetai[beta]=D.index(1-beta)
mm[(beta,m)]=(m+WR.Qv[betai[beta]])
mmm[(beta,m)]=mpmath.mp.mpf(mm[(beta,m)])
if(verbose>0):
print "beta,m=",beta,m
print "mm=",mm[(beta,m)]
#print "-beta=",minus_beta
#print "mm=",mm[(beta,m)]
if mm[(beta,m)]>t:
t=mm
if abs(mm[(beta,m)])<K0:
K0=abs(mm[(beta,m)])
if abs(PP[(beta,m)])>K1:
K1=abs(PP[(beta,m)])
# One way to specify the principal part
# is to only specify half and let the rest be decided
# by the symmetry. If we specify the rest it must match
if PP.has_key((mbeta[beta],m)) and PP.has_key((beta,m)):
test=abs(PP[(beta,m)]-sym_type*PP[(mbeta[beta],m)])
if test>0: # and not test.ae(mp0):
raise ValueError,"The principal part has not correct symmetry: type=%s, PP=%s" %(sym_type,PP)
else:
pass
abD=len(WR.D)
if(Yin==None):
Y0=mpmath.mp.mpf(0.5)
else:
Y0=mpmath.mp.mpf(Yin)
kint=mpmath.mp.mpf(1-weight)
sym_type=M.get_sym_type()
NA=Ns[0]; NB=Ns[1]
if(sym_type==1):
Dstart=int(0); Dfinish=int(WR.N) # 0,1,...,N
elif(sym_type==-1):
Dstart=int(1); Dfinish=int(WR.N-1) # 1,2,...,N-1 (since -0=0 and -N=N)
IA=int(max(Is[0],Dstart)); IB=int(min(Is[1],Dfinish))
Ms=int(min(C[Dstart].keys())); Mf=int(max(C[Dstart].keys())); Ml=int(Mf-Ms+1)
#Ms=int(min(C[D[Dstart]].keys())); Mf=int(max(C[D[Dstart]].keys())); Ml=int(Mf-Ms+1)
if(verbose>0):
print "Ms,Mf,Ml=",Ms,Mf,Ml
K1=K1*2*N
NAA=NA; IAA=IA
numys=2
# have Y=mpmath.mp.mpf(Y_in)
# have to find suitable Q for the given Y
if(verbose>0):
print "dps=",mpmath.mp.dps
ctmp=dict(); ctmp_neg=dict()
Cout=dict()
for bi in range(IA,IB+1):
Cout[bi]=dict()
stw=str(weight)[0:5]
Qadd=0; Yfak=mpmath.mpf(0.95); Yvold=list(range(2))
Xm=dict();Xpb=dict();Ypb=dict(); Cv=dict()
Q=dict(); Qs=dict();Qf=dict(); QQ=dict()
for yloop in range(1000):
Yv=[Y0,Yfak*Y0]
for i in range(2):
Q[i]=M.get_M(Yv[i],K0,K1,prec)+Qadd
Qs[i]=1-Q[i]; Qf[i]=Q[i]; QQ[i]=mpmath.mp.mpf(1)/mpmath.mp.mpf(2*Q[i])
if(verbose>0):
print "Yv[",i,"]=[",mppr(Yv[0]),",",mppr(Yv[1]),"]"
print "Q(Y)[",i,"]=",Q[i]
#print "1/2Q[",i,"]=",mppr(QQ[i])
# Recall that the first Y-value is always the larger
if(verbose>1):
print "Yvold=",mppr(Yvold[0]),",",mppr(Yvold[1]),"]"
if(Yv[0].ae(Yvold[1])):
if(verbose>1):
print "do not evaluate for Yv=",Yv[0]
[Xm[0],Xpb[0],Ypb[0],Cv[0]]=[Xm[1],Xpb[1],Ypb[1],Cv[1]]
[Xm[1],Xpb[1],Ypb[1],Cv[1]]=pullback_pts_weil_rep(WR,Q[1],Yv[1],weight,Dstart,Dfinish)
else:
for i in range(2):
[Xm[i],Xpb[i],Ypb[i],Cv[i]]=pullback_pts_weil_rep(WR,Q[i],Yv[i],weight,Dstart,Dfinish)
Yvold=Yv; Zipb=dict()
for yj in range(0,numys):
Zipb[yj]=dict()
for j in range(Qs[yj],Qf[yj]+1):
Zipb[yj][j]=mpmath.mp.mpc(-Ypb[yj][j],Xpb[yj][j])
gamma_fak=dict()
for yj in range(0,numys):
for bi in range(IA,IB+1):
for l in range(Ms,Mf+1):
lr=mpmath.mp.mpf(l+WR.Qv[bi])
if(lr<0):
lrtwo=lr*mptwo
for j in range(Qs[yj],Qf[yj]+1):
gamma_fak[yj,bi,l,j]=mpmath.gammainc(kint,abs(lrtwo)*Ypb[yj][j])*mpmath.mp.exp(-lr*Ypb[yj][j])
else:
for j in range(Qs[yj],Qf[yj]+1):
gamma_fak[yj,bi,l,j]=mpmath.mp.exp(-lr*Ypb[yj][j])
if(verbose>0):
print "Got pullback points!"
print "dps=",mpmath.mp.dps
print "NAA=",NAA
# If we want to do negative coefficients too we save time by computing simultaneously
do_neg=True
try:
for n in range(NAA,NB+1):
if(verbose>0):
print "n=",n
for ai in range(IAA,IB+1):
if(verbose>0):
print "ai=",ai
mai=-ai % abD
nr=mpmath.mp.mpf(n+WR.Qv[ai])
nrtwo=mp2*nr
nri=mpmath.mp.mpc(0,nr)
if(do_neg):
nrm=mpmath.mp.mpf(-n+WR.Qv[ai])
nrmi=mpmath.mp.mpc(0,nrm)
nrmtwo=mp2*nrm
for yj in range(0,numys):
Y=Yv[yj]*twopi
summa=mp0;
summa_neg=mp0
#print "IA,IB=",IA,IB
fak=dict()
for j in range(Qs[yj],Qf[yj]+1):
fak[j]=mpmath.mp.exp(-nri*Xm[yj][j])
if(do_neg):
fak_neg=dict()
for j in range(Qs[yj],Qf[yj]+1):
fak_neg[j]=mpmath.mp.exp(-nrmi*Xm[yj][j])
for bi in range(IA,IB+1):
mbi=-bi % abD
#print "mbi=",mbi
for l in range(Ms,Mf+1):
if(C[bi][l]==0 or abs(C[bi][l]).ae(mp0)):
if(verbose>0):
print "Skip coeff ",bi,l
#continue
lr=mpmath.mp.mpf(l+WR.Qv[bi])
ilr=mpmath.mp.mpc(0,lr)
Vtmp=mp0;
Vtmp_neg=mp0
for j in range(Qs[yj],Qf[yj]+1):
if(mbi<>bi):
ch=Cv[yj][j][ai,bi]+sym_type*Cv[yj][j][ai,mbi]
else:
ch=Cv[yj][j][ai,bi]
if(ch==0 or ch.ae(mp0)):
continue
tmp=(ch*mpmath.exp(ilr*Xpb[yj][j]))*gamma_fak[yj,bi,l,j]
Vtmp=Vtmp+tmp*fak[j]
if(do_neg):
Vtmp_neg=Vtmp_neg+tmp*fak_neg[j]
summa=summa+Vtmp*C[bi][l]
if(do_neg):
summa_neg=summa_neg+Vtmp_neg*C[bi][l]
if(verbose>1):
print "summa(",yj,")=",summa
if(do_neg):
print "summa_neg(",yj,")=",summa_neg
wsumma=mp0;
wsumma_neg=mp0
for (beta,m) in PP:
app=mpmath.mp.mpf(PP[(beta,m)])
lr=mpmath.mp.mpf(m+WR.Qv[betai[beta]])
tmpsumma=mp0;
tmpsumma_neg=mp0
for j in range(Qs[yj],Qf[yj]+1):
if(betai[beta] <> mbetai[beta]):
ch=Cv[yj][j][ai,betai[beta]]+sym_type*Cv[yj][j][ai,mbetai[beta]]
else:
ch=Cv[yj][j][ai,betai[beta]]
if(ch==0 or ch.ae(mp0)):
continue
tmp=ch*mpmath.exp(lr*Zipb[yj][j])
tmpsumma=tmpsumma+tmp*fak[j]
if(do_neg):
tmpsumma_neg=tmpsumma_neg+tmp*fak_neg[j]
wsumma=wsumma+app*tmpsumma
if(do_neg):
wsumma_neg=wsumma_neg+app*tmpsumma_neg
if(verbose>1):
print "wsumma(",yj,")=",wsumma
if(do_neg):
print "wsumma_neg(",yj,")=",wsumma_neg
sumtmp=(summa+wsumma)*QQ[yj]
if(PP.has_key((D[ai],n))>0):
sum_tmp=sum_tmp-PP[(D[ai],n)]*mpmath.mp.exp(-nr*Y)
lhs=mpmath.mp.exp(nr*Y)
if(verbose>0):
print "exp(2pinY)=",mppr(lhs)
ctmp[yj]=sumtmp*lhs
if(do_neg):
sumtmp_neg=(summa_neg+wsumma_neg)*QQ[yj]
if(PP.has_key((D[ai],-n))>0):
sumtmp_neg=sumtmp_neg-PP[(D[ai],-n)]*mpmath.mp.exp(-nrm*Y)
lhs=mpmath.gammainc(kint,abs(nrmtwo)*Y)*mpmath.mp.exp(-nrm*Y)
if(verbose>0):
print "Gamma(1-k,4pinY)=",mppr(lhs)
ctmp_neg[yj]=sumtmp_neg/lhs
# end for yj
if(verbose>-1):
print "C1[",n,ai,"]=",ctmp[0].real
print "C2[",n,ai,"]=",ctmp[1].real
if(do_neg):
print "C1[",-n,ai,"]=",ctmp_neg[0].real
print "C2[",-n,ai,"]=",ctmp_neg[1].real
if(do_neg):
err_pos=abs(ctmp[1]-ctmp[0])
err_neg=abs(ctmp_neg[1]-ctmp_neg[0])
err=err_pos # max(err_pos,err_neg)
if(verbose>-1):
print "err_pos=",mppr(err_pos)
print "err_neg=",mppr(err_neg)
else:
err=abs(ctmp[1]-ctmp[0])
if(verbose>-1):
print "err=",mppr(err)
if(verbose>0):
if(C.keys().count(ai)>0):
if(C[ai].keys().count(n)>0):
print "Cin(",ai,n,")=",C[ai][n].real
if(C[ai].keys().count(-n)>0):
print "Cin(",ai,-n,")=",C[ai][-n].real
sys.stdout.flush()
if(err>eps):
# Have to modify
if(verbose>0):
print " Need to decrease Y!"
Y0=Yv[0]*Yfak
#Qadd=Qadd+10
#Yv[0]=Y0; Yv[1]=Y0*mpmath.mp.mpf(0.95)
NAA=n; IAA=ai
raise StopIteration()
else:
Cout[ai][n]=ctmp[1]
if(do_neg):
Cout[ai][-n]=ctmp_neg[1]
if(verbose>0):
print "OK! av=",(ctmp[1]+ctmp[0])/mpmath.mpf(2)
# If we are in the range of the used C's we update
#if(C.keys().count(ai)>0):
# if(C[ai].keys().count(n)>0):
# C[ai][n]=ctmp[1]
# if(do_neg and C[ai].keys().count(-n)>0):
# C[ai][-n]=ctmp_neg[1]
#continue
# end for ai
#print "n at end=",n
# end for n
return Cout
except StopIteration:
if(verbose>0):
print "Iteration stopped!"
continue
raise StopIteration()
class VVHarmonicWeakMaassFormElement(AutomorphicFormElement):
r"""
A harmonic weak Maass form.
"""
def __init__(self,M,principal_part=None,C=None,prec=None,Lv=None):
r"""
Initialize a harmonic weak Maass form element.
INPUT:
-''M'' -- space of harmonic weak Maass forms
-''PP''-- Principal part
-''C''-- Fourier coefficients
-''prec'' -- integer, precision (if given by construction, default None)
EXAMPLES::
sage: WR=WeilRepDiscriminantForm(11,dual=True)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
sage: PP={(7/22,0):1}
sage: F=M.get_element(PP,12);F
Element of Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and dimension 10.
Representation is Dual of Weil representation of the discriminant form given by ZZ/22ZZ with quadratic form Q(x)=11*x**2 mod 1. with principal part: q^-5/44
"""
if(isinstance(M,type(Newforms(1,12)[0]))):
self._init_from_newform_(M,principal_part,C,prec)
return
if(C <> None):
if(M.dim <> len(C.keys())):
raise ValueError,"Coefficient vector of wrong format! Got length=%s" % len(C)
#if(not isinstance(M
## We inherit symmetry from the space
self._sym_type = M.sym_type
self._set_sym_map(M)
self._class_name = "VVHarmonicWeakMaassFormElement"
AutomorphicFormElement.__init__(self,M,C=C,prec=prec,principal_part=principal_part)
#self.space=M
#self.principal_part=PP
#self.coeffs=C
#self.prec=prec
self._verbose = self._space._verbose
self.maxdigs=prec # the number of digits needed to be displayed to print all digits of the coefficients
if(Lv<>None):
self._Lv=Lv
else:
self._Lv=dict()
self.Cp0=0; self.Cp1=0; self.Cm=0
## We also find the space corresponding to this space via inverse of xi_k and Shimura corr.
t=QQ(RR(M.weight))-QQ(1)/QQ(2)
if(is_int(t)):
k=QQ(3)-QQ(2)*QQ(RR(M.weight))
if self._verbose > 0:
print "k=",k
self.shim_corr=Newforms(self._space.WR.N,k,names='a') #.new_subspace()
else:
self.shim_corr=None
def _set_sym_map(self,M):
r"""
controls which components are equivalent (or zero) under symmetry.
"""
self._sym_map = {}
N = M.WR.N
self._sym_reps = range(2*N)
if self._sym_type not in [1,-1]:
return
else:
if self._sym_type == 1:
self._sym_reps = range(N+1)
for i in self._sym_reps:
self._sym_map[i]=i
if i<>N and i<>0:
self._sym_map[2*N-i]=i
else:
self._sym_reps = range(1,N)
for i in self._sym_reps:
self._sym_map[i]=i
self._sym_map[2*N-i]=i
self._sym_map[0]=None # Gets mapped to zero
self._sym_map[N]=None # Gets mapped to zero
def sym_type(self):
return self._sym_type
def sym_map(self):
return self._sym_map
def sym_reps(self):
return self._sym_reps
def __reduce__(self):
r"""
"""
#return(HarmonicWeakMaassFormElement,(self.space,self.principal_part.items(),self._coeffs,self.prec))
return(VVHarmonicWeakMaassFormElement,(self._space,self._principal_part,self._coeffs,self.prec,self._Lv))
def _repr_(self):
r""" Return string representation of self.
EXAMPLES:
sage: WR=WeilRepDiscriminantForm(11,dual=True)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
sage: PP={(7/22,0):1}
sage: F=M.get_element(PP,12);F
Element of Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and dimension 10.
Representation is Dual of Weil representation of the discriminant form given by ZZ/22ZZ with quadratic form Q(x)=11*x**2 mod 1. with principal part: q^-5/44
"""
s="Element of "+str(self._space)+" with principal part: "
WR=self._space.WR
sp=""
for (b,m) in self._principal_part:
a=self._principal_part[(b,m)]
if(a<>0):
x=QQ(m+WR.Qv[WR.D.index(b)])
if(a<>1):
if(a>0 and len(sp)>0):
ast="+"+str(a)
else:
ast=str(a)
sp=sp+ast+"q^{"+str(x)+"}"
else:
sp=sp+"q^{"+str(x)+"}"
s=s+sp
return s
def _latex_(self):
r""" Return LaTeX string representation of self.
EXAMPLES:
sage: WR=WeilRepDiscriminantForm(11,dual=True)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
sage: PP={(7/22,0):1}
sage: F=M.get_element(PP,12);F
Element of Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and dimension 10.
Representation is Dual of Weil representation of the discriminant form given by ZZ/22ZZ with quadratic form Q(x)=11*x**2 mod 1. with principal part: q^-5/44
"""
old=s="\\begin{verbatim}\\end{verbatim}"
new=""
s="\\begin{verbatim}\\end{verbatim}"
s+="Element of "+self._space._latex_().replace(old,new)+" With principal part "
WR=self._space.WR
# If we have more than one non-zero element in the principal part we have to
# addd a + between terms
sp=""
for (b,m) in self._principal_part:
a=self._principal_part[(b,m)]
if(a<>0):
x=QQ(m+WR.Qv[WR.D.index(b)])
if(a<>1):
if(a>0 and len(sp)>0):
ast="+"+str(a)
else:
ast=str(a)
sp=sp+ast+"q^{"+str(x)+"}"
else:
sp=sp+"q^{"+str(x)+"}"
s=s+sp+"$."
return s
def _init_from_newform_(self,g,PP,C,prec):
r""" Init a function from a newform (without computing Fourier coefficients).
"""
k=g.weight()
N=g.level()
if(k % 4==0):
ep=g.atkin_lehner_eigenvalue()
else:
ep=-g.atkin_lehner_eigenvalue()
kappa=mpmath.mpf(3-k)/mp2
if(ep==-1):
WR=WeilRepDiscriminantForm(N,False)
elif(ep==1):
WR=WeilRepDiscriminantForm(N,True)
else:
raise ValueError," Sign of functional equation must be 1 or -1! Got:%s"%ep
#print "kappa=",kappa
#print "WR=",WR
M=VVHarmonicWeakMaassForms(WR,kappa,prec)
#print "M=",M
self._space=M
self.prec=prec
self.coeff=dict()
# We want a Harmonic weak maass form corresponding to the form g
# that means we need to avoid any other cuspforms as well as
# theta series...
# If there are no oldforms we are happy
if dimension_new_cusp_forms(N,k)==dimension_cusp_forms(N,k):
# and to avoid theta series we need to avoid square discriminants
# in the principal part
if M._is_dual_rep:
nset=[0,-1]
else:
nset=[-1]
try:
for n in nset:
for r in WR.D_as_integers:
D=M.D_from_rn((r,n))
if(not is_square(D)):
PP={(WR.D[r],n):1}
self._principal_part=PP
#print "PP=",PP,"is ok!"
raise StopIteration()
except StopIteration:
pass
#if(C<>None and C>0):
def get_principal_part(self,str=False,disc=False):
r""" Return principal part of self.
INPUT:
-disc -- logical (default False) if True, return the principal part as discriminants
EXAMPLES:
sage: WR=WeilRepDiscriminantForm(11,dual=True)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
sage: PP={(7/22,0):1}
sage: F=M.get_element(PP,12);F
Element of Space of Vector-Valued harmonic weak Maass forms on Modular Group SL(2,Z) of weight 1/2 and dimension 10.
Representation is Dual of Weil representation of the discriminant form given by ZZ/22ZZ with quadratic form Q(x)=11*x**2 mod 1. with principal part: q^-5/44 """
if(not disc and not str):
return self._principal_part
L=list()
for (r,n) in self._principal_part:
D=self._space.D_from_rn((r,n))
L.append((D,self._principal_part[(r,n)]))
if(disc):
return L
if(str):
if(len(L)==1 and L[0][1]==1):
return L[0][0]
else:
return L
def pairing(self,g,t=1):
r""" Compute the bilinear pairing {g,self} for a holomorphic form g
"""
# Have to make test of g!!!
#raise NotImplementedError," Need a proper class of holomorphic vector-valued forms!"
# # We need to obtain the coefficients of the inverse Shimura rep. first
# # if we want to apply this to a scalar holomorphic form g
# First locate the maximum discriminant we need
Dmax=0
PP=self._principal_part
for (r,n) in PP:
D=self._space.D_from_rn((r,n))
if(abs(D)>abs(Dmax)):
Dmax=D
sig=sign(Dmax)
Dmax=10 # abs(Dmax)
print "Dmax=",sig*Dmax
#t=1 # if this doesn't work we have to choose another t
# I also assume that g and G have trivial characters
syst=matrix(ZZ,Dmax,Dmax)
rhs=matrix(ZZ,Dmax,1)
k=Integer(g.weight()/Integer(2))
for n in range(1,Dmax+1):
rhs[n-1,0]=g[n]
for d in range(1,Dmax+1):
if(n % d <>0):
continue
## I use the character d -> (4N / d)
#chi=(kronecker(-1,d)**k)*kronecker(t,d)*kronecker(d,F.space.WR.level)
chi=kronecker(t,d) #*kronecker(d,F.space.WR.level)
am=chi*d**(k-1)
#print "am[",n,d,"]=",am
syst[n-1,n/d-1]=am
X=syst.solve_right(rhs)
C=dict()
for j in range(1,Dmax+1):
C[t*j**2]=X[j-1,0]
print "C[",t*j**2,"]=",X[j-1,0]
return C
summa=0
PP=self._principal_part
for (r,n) in PP:
summa=summa+PP[(r,n)]*g.coeff((r,n))
return summa
def compute_coefficients(self,nrange,irange=None,prec=10,ef=True,Qadd=0):
r""" Compute a list of coeficients.
INPUT:
- nrange -- range of integers
- irange -- range of integers
- prec -- integer
- ef -- logical
- Qadd -- integer (take more sample points along the horocycle)
OUTPUT:
Coefficients C(i,n) for i in irange and n in nrange.
EXAMPLES:
sage: WR=WeilRepDiscriminantForm(11,dual=True)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
"""
# we first need an initial set of coefficients
C=self._coeffs; P=self._principal_part; M=self._space
WR=M.WR; weight=M.weight
if(self.prec>= prec or len(C)>0):
# presumable we already have good coefficients
pass
else:
# Need initial set first
print "Computing initial set of coefficients!"
self.prec=prec
[Y,M0]=self._space.get_Y_and_M(P,weight,prec)
Q=M0+10
W=vv_harmonic_wmwf_setupV(WR,P,Y,M0,Q,weight,self._space.sym_type,verbose=self._space._verbose)
if(P.has_key((0,0))):
N=set_norm_vv_harmonic_weak_maass_forms(WR,cusp_form=True,holomorphic=self._holomorphic)
else:
N=set_norm_vv_harmonic_weak_maass_forms(WR,cusp_form=False,holomorphic=self._holomorphic)
C=solve_system_for_vv_harmonic_weak_Maass_waveforms(W,N,verbose=self._verbose)
# endif
# check if we have all coefficients we wanted
maxc=max(C[C.keys()[0]].keys())
if(maxc>=max(nrange)):
print "Have all we need!"
pass
else:
# we do not have all coefficients we need
print "Need to compute more!!"
Ns=nrange # [maxc,max(nrange)]
if(irange<>None):
Is=irange
else:
Is=[min(M.D_as_int),max(M.D_as_int)]
# Try to find good Y
# Recall that the error in the negative part is usually smaller than in the positive part
M_minus=abs(min(self._coeffs[self._coeffs.keys()[0]]))
M_plus=abs(max(self._coeffs[self._coeffs.keys()[0]]))
# Assume we computed these coefficients at (almost) the highest horocycle
Y0=mpmath.sqrt(3)/mpmath.mpf(2)*mpmath.mpf(0.995)
[err_minus,err_plus]=self.get_error_estimates(Y0,M_minus,M_plus)
kint=mpmath.mp.mpf(1-self._space.weight)
print "original:"
print "err-=",err_minus
print "err+=",err_plus
Y0=mpmath.mpf(0.5)
Yin=Y0
for j in range(5000):
Y=Y0*mpmath.power(mpmath.mpf(0.99),j)
t=mpmath.pi()*2*Y*abs(Ns[0])
tmp1=mpmath.exp(t)
err1=err_plus*tmp1
#print "err+=",err1
tmp2=mpmath.gammainc(kint,2*t)
err2=err_plus*mpmath.exp(-t)/tmp2
#print "err-=",err2
if(max(err1,err2)<mpmath.power(10,-prec)):
Yin=Y
break
#t=max(1.0,abs(mpmath.log10(prec)-mpmath.log10(self.prec)))
#Yin=t/mpmath.mpf(Ns[0]+Ns[1])*mpmath.mpf(2.0) ## This should be good on average
#Yin=Yin*mpmath.mpf(0.2)
print "err=",max(err1,err2)
print "Yin=",Yin
sys.stdout.flush()
#Qadd=40
try:
if(ef):
CC=vv_harmonic_wmwf_phase2_2_ef(self,Ns,Is,prec,Yin,Qadd_in=Qadd)
else:
CC=vv_harmonic_wmwf_phase2_2(M,P,C,Ns,Is,prec,Yin)
for x in CC.keys():
C[x]=CC[x]
except KeyboardInterrupt:
print "Manually stopping..."
def get_error_estimates(self,Y,M1,M2=None):
r""" Compute the constants needed to make error estimates.
"""
# First K0 and K1
Mminus=M1
if(M2==None):
Mplus=M1
else:
Mplus=M2
if(self.Cp0<>0 and self.Cp1<>0 and self.Cm<>0):
Cp0=self.Cp0; Cp1=self.Cp1; Cm=self.Cm
else:
PP=self.principal_part()
Cmax=max(PP.values());Kmax=0
for t in PP.keys():
if isinstance(t,tuple):
(c,l) = t
elif isinstance(t,(int,Integer)):
(c,l)=self._space.rn_from_D(t)
else:
raise ValueError,"Incorrect principal part: t={0}".format(t)
if c in self._space.WR.D:
tmp=l+self._space.WR.Qv[self._space.WR.D.index(c)]
elif c in range(len(self._space.WR.Qv)):
tmp=l+self._space.WR.Qv[c]
else:
raise ValueError,"Incorrect principal part: c,l={0},{1}".format(c,l)
if(abs(tmp)>Kmax):
Kmax=abs(tmp)
[Cp0,Cp1]=self._space.get_Cp(Cmax)
Cm=self._space.get_Cm(Kmax,Cmax)
self.Cp0=Cp0; self.Cp1=Cp1; self.Cm=Cm
fak=len(self._space.WR.D)
#print "Cp0,Cp1,Cm=",Cp0,Cp1,Cm
#print "fak=",fak
er1=fak*self._space.err_est_vv_hwmf_neg(Y,Mminus,Cm)
er2=fak*self._space.err_est_vv_hwmf_pos(Y,Mplus,Cp0,Cp1)
return [er1,er2]
def get_coefficient(self,L,n=None):
r""" Return a coefficient or a list of coeficients.
EXAMPLES:
sage: WR=WeilRepDiscriminantForm(11,dual=True)
sage: M=VVHarmonicWeakMaassForms(WR,0.5,100)
sage: F=M.get_element({(7/22,0):1},12)
"""
if(isinstance(L,list)):
l=list()
for t in L:
if(isinstance(t,tuple)):
tt=t
elif(is_int(t)):
tt=self._space.rn_from_D(t)
if(tt<>None):
c=self.get_one_coefficient(tt[0],tt[1])
l.append(c)
return l
elif(is_int(n)):
return self.get_one_coefficient(L,n)
elif(is_int(L)):
tt=self._space.rn_from_D(L)
if(tt<>None):
return self.get_one_coefficient(tt[0],tt[1])
else:
raise ValueError,"Incorrect keys for coefficents: L,n=%s, %s" %(L,n)
def C(self,r,n=None):
r"""
Alias to get_coefficient.
"""
return self.get_coefficient(r,n)
def get_one_coefficient(self,r,n):
r""" Return coefficient c(r,n) if it exists
"""
c=None
if( not r in self._space.WR.D and not r in self._space.WR.D_as_integers):
raise ValueError,"Component r=%s is not valid!" % r
# We also see if the coefficient can be provided via symmetry
# If r is in D we swap it to its index
if( r in self._space.WR.D ):
rr=self._space.WR.D.index(r)
else:
rr=r
minus_rr=(len(self._space.WR.D)-rr) % len(self._space.WR.D)
#print "rr=",rr
#print "-rr=",minus_rr
if self._space._is_dual_rep:
if rr == minus_rr:
return 0
if self._coeffs.has_key(rr):
if self._coeffs[rr].has_key(n):
c=self._coeffs[rr][n]
elif self._coeffs.has_key(minus_rr):
if self._coeffs[minus_rr].has_key(n):
c=self._coeffs[minus_rr][n]
return c
def add_coefficients_from_file(self,file=None,overwrite=False,nmax=None):
r"""
Add a set of coefficients from a file of the format:
r n C(r,n)
"""
C=dict()
f=open(file,'r')
i=0
md=mpmath.mp.dps
mpold=mpmath.mp.dps
for line in f:
i=i+1
if(nmax<>None and i>nmax):
break
#return line
l=line.strip().split()
if(len(l)<2):
continue
#print "l=",l
r=int(l[0])
n=int(l[1])
#print "r=",r,"n=",n,self._coeffs.keys(),self._coeffs.keys().count(r)
if(self._coeffs.keys().count(r)==0):
continue
cs=join(l[2:len(l)])
#print cs
## see if the string is given in arprec format: 10^a x b
if(cs.count("^")==1 and cs.count("x")==1):
s=cs.split("^")[1]
a=s.split("x")
cs=a[1]+"E"+a[0]
cs=cs.replace(" ","")
if(len(cs)>md):
md=len(cs)
##mpmath.mp.dps=md
#if(r==1 and n==79):
# print "l=",l
# print "c(",r,n,")=",cs
c=mpmath.mpf(cs)
#mpmath.mp.dps=mpold
#print "c(",r,n,")=",c
C[(r,n)]=c
#return C
self.add_coefficients(C,overwrite)
def add_coefficients(self,L,overwrite=False):
r""" Add one or more coefficients to self.
INPUT:
-''L'' -- dictionary of pairs of indices and coefficients
-''overwrite'' -- logical, set to True if we want to overwrite present coefficients
"""
if(not isinstance(L,dict)):
raise ValueError,"Call with dictionary as argument!"
for p in L.keys():
c=mpmath.mpmathify(L[p])
#print "c=",c
cd=ceil(mpmath.log10(abs(c)))
if(cd>self.maxdigs):
self.maxdigs=cd
#print "p=",p
if(is_int(p)):
(r,n)=self._space.rn_from_D(p)
elif(isinstance(p,tuple)):
(r,n)=p
if(self._coeffs.has_key(r)):
if(self._coeffs[r].has_key(n)):
c_old=self._coeffs[r][n]
## Try to determine (heuristically) if the new coefficient is really better
d1=dist_from_int(c)[0]
d2=dist_from_int(c_old)[0]
if(overwrite):
self._coeffs[r][n]=c
else:
self._coeffs[r][n]=c
else:
# see if it is a possible index at all
if(not r in self._space.WR.D_as_integers):
raise ValueError,"Key %s corr to (r,n)=(%s,%s) is invalid for the current space!" %(p,r,n)
elif(not r in self._space.WR.D_as_int):
if(self._space.sym_type==-1 and (r==0 or r==self._space.WR.N)):
# Should be 0 by symmetry
if(abs(c)>10**(1-self.prec)):
raise ValueError,"Coefficient should be zero by symmetry. Got c(%s,%s)=%s!" %(r,n,c)
else:
self._coeffs[r][n]=0
else:
# is equal to +- c(-r,n)
mr=2*self.WR.N-r
if(self._coeffs.has_key(mr)):
if(self._coeffs[mr].has_key(n)):
c_old=self._coeffs[mr][n]
if( abs(c-self._space.WR.sym_type*c_old)> 10**(1-self.prec)):
st="Might add an erronous coefficient! Got c(%s,%s)=%s. " % (r,n,c)
st+="From previous coefficients should have %s" % (self._space.sym_type*c_old)
raise ValueError,st
if(overwrite):
self._coeffs[mr][n]=c
else:
raise ValueError,"Coefficient should be zero by symmetry. Got c(%s,%s)=%s!" %(r,n,c)
def list_coefficients(self,format='components',fd=True,pos=True,neg=True,printimag=False,norm_neg=True,nmin=0,nmax=0,latex=False,nd=0,Lv=False,prime=False):
r""" List all coefficients C^{+}(Delta} corresponding to fundamental discriminants up to Dmax.
INPUT:
-``format`` -- string.
== components (default) means that we list coefficients component-wise
== disc menas that we list according to discriminant
-``fd`` -- logical (default True) if set to True only prints coefficients given by fundamental discriminants
-``max`` -- integer (default 0) the largest coefficient (either max n or max D). If 0 we list all coefficients we got
-``pos`` -- logical (default True) if set to true prints positive coefficients
-``neg`` -- logical (default True) if set to true prints negative coefficients
-``norm_neg`` -- logical( default True) if we want to normalize the negative coefficients by dividing c(D) with some non-zero c(j) and sqrt(D)
-``printimag`` -- logical (default False) if set to True prints imaginary parts (which otherwise are assumed zero)
-``max`` -- integer (default 0) if >0 denotes the maximum index of coefficients to print
-''Lvals'' -- logical (default False) if True add values from self._Lv in a third column
-''prime'' -- only list coefficients of discriminant relatively prime to the level
"""
M=self._space; WR=M.WR
C=self._coeffs
if format[0]=="C" or format[0]=="c":
self._list_coefficients_by_components(fd,pos,neg,printimag,norm_neg,nmin,nmax,latex,nd,Lv,prime)
else:
self._list_coefficients_by_discriminant(fd,pos,neg,printimag,norm_neg,nmin,nmax,latex,nd,Lv,prime)
def _list_coefficients_by_components(self,fd=True,pos=True,neg=True,printimag=False,norm_neg=True,nmin=0,nmax=0,latex=False,nd=0,Lvals=False,prime=False):
r""" List all coefficients C^{+}(Delta} corresponding to fundamental discriminants up to Dmax.
INPUT:
-``fd`` -- logical (default True) if set to True only prints coefficients given by fundamental discriminants
-``pos`` -- logical (default True) if set to True prints positive coefficients
-``neg`` -- logical (default True) if set to True prints negative coefficients
-``printimag`` -- logical (default False) if set to True prints imaginary parts (which otherwise are assumed zero)
-``norm_neg`` -- logical( default True) if we want to normalize the negative coefficients by dividing c(D) with some non-zero c(j) and sqrt(D)
-``nmax`` -- integer (default 0) if >0 denotes the maximum index of coefficients to print
-''Lvals'' logical (default False) if True add values from self._Lv in a third column
-''prime'' -- only list coefficients of discriminant relatively prime to the level
"""
sig=1
if(self._space.WR.is_dual()):
sig=-1
maxi=max(self._coeffs.keys())
w1=len(str(maxi))
w2=max(map(len,str(self._space.WR.D).split()))
maxn=max(self._coeffs[self._coeffs.keys()[0]].keys())
w3=len(str(maxn))+1
C=self._coeffs
mp0=mpmath.mpf(0)
mpold=mpmath.mp.dps
N=self._space.WR.N
if(mpmath.mp.dps < self.maxdigs):
mpmath.mp.dps=self.maxdigs
if(norm_neg):
cnorm=0
tnorm=(0,0)
for j in range(1,100):
t=self._space.rn_from_D(-j*sig)
if(t==None):
continue
if(t[1]+self._space.WR.Qv[t[0]]>=0):
continue
c1=self.get_coefficient(t[0],t[1])
if c1==None:
continue
if abs(c1)>self._prec:
cnorm=c1
tnorm=t
print "c1=c(",tnorm,"),",cnorm
break
for r in C.keys():
for n in range(min(C[r].keys()),max(C[r].keys())+1):
if(nmin>0 and abs(n)<nmin):
continue
if(nmax>0 and abs(n)>nmax):
continue
nn=n+self._space.WR.Qv[r]
if(not neg and nn<0):
continue
if(not pos and nn>=0):
continue
D=self._space.D_from_rn((r,n))
if(fd):
if(fd and not is_fundamental_discriminant(D) and D<>1):
continue
if(prime and gcd(D,N)>1):
continue
c=self.get_coefficient(r,n)
cs=""
if(c<>0 and c<>None):
if(nn>=0): ss="+"
if(nn<0):
ss="-"
if(norm_neg):
#print "r,n=",r,n
#print "cnorm=",cnorm
#print "tnorm=",tnorm
D=self._space.D_from_rn((r,n))
if( ((r,n)<> tnorm) and cnorm<>0):
c=c/cnorm*mpmath.sqrt(mpmath.mpf(abs(D)))
if(c.real()>=0): cs=" "
if(printimag==False):
if(nd>0):
cs=str(c.real()).strip()
cs=sci_pretty_print(cs,nd,latex_pow=latex)
else:
cs=str(c.real())
else:
cs=cs+str(c)
if(Lvals and self._Lv.keys().count(DD)==1):
ls="\t"+str(self._Lv[DD])
else:
if(latex):
ls="\\\\ \n"
else:
ls=""
if(latex):
D=self._space.WR.D[r]
if(is_int(D)):
p=numerator(D); q=denominator(D)
sr="\\frac{"+str(p)+"}{"+str(q)+"}"
else:
sr=str(D)
ss=""
print "$C"+ss+"("+sr.ljust(w1)+","+str(n).ljust(w3)+")$ & $"+cs+"$"+ls
else:
print "C^"+ss+"["+str(r).ljust(w1)+"]["+str(n).ljust(w3)+"] = "+cs+ls
mpmath.mp.dps=mpold
def _list_coefficients_by_discriminant(self,fd=True,pos=True,neg=True,printimag=False,norm_neg=True,dmin=0,dmax=0,latex=False,nd=0,Lvals=False,prime=False):
r""" List all coefficients C^{+}(Delta} corresponding to fundamental discriminants up to Dmax.
INPUT:
-``fd`` -- logical (default True) if set to True only prints coefficients given by fundamental discriminants
-``pos`` -- logical (default True) if set to True prints positive coefficients
-``neg`` -- logical (default True) if set to True prints negative coefficients
-``printimag`` -- logical (default False) if set to True prints imaginary parts (which otherwise are assumed zero)
-``norm_neg`` -- logical( default True) if we want to normalize the negative coefficients by dividing c(D) with some non-zero c(j) and sqrt(D)
-``dmax`` -- integer (default 0) if >0 denotes the maximum index of coefficients to print
-''latex'' logical (default False) set to true if you want a latex table
-''Lvals'' logical (default False) if True add values from self._Lv in a third column
-''prime'' -- only list coefficients of discriminant relatively prime to the level
"""
sig=1
S="$"
if(self._space.WR.is_dual()):
sig=-1
maxn=max(self._coeffs[self._coeffs.keys()[0]].keys())
maxD=self._space.WR.level*(maxn+1)
N=self._space.WR.N
if(dmax>0):
w1=len(str(dmax))+1
else:
w1=len(str(maxD))+1
w2=max(map(len,str(self._space.WR.D).split()))
w3=len(str(maxn))+1
mp0=mpmath.mpf(0)
mpold=mpmath.mp.dps
if(mpmath.mp.dps < self.maxdigs):
mpmath.mp.dps=self.maxdigs
if(norm_neg and neg):
cnorm=0
tnorm=(0,0)
for j in range(1,100):
t=self._space.rn_from_D(-j*sig)
if(t==None):
continue
if(t[1]+self._space.WR.Qv[t[0]]>=0):
continue
c1=self.get_coefficient(t[0],t[1])
if(c1 == None):
continue
#print "c1 =",c1
# If the first coefficient is zero to the precision we assume we shouldn't divide by it
if(abs(c1)>mpmath.power(10,-self.prec)):
cnorm=c1*mpmath.sqrt(j)
tnorm=t
print "c1=c(",tnorm,")=c(",-j*sig,")=",cnorm
break
for sn in [1,-1]:
for D in range(1,maxD):
#print "D=",D
if(dmin>0 and abs(D)<dmin):
continue
if(dmax>0 and abs(D)>dmax):
continue
DD=sig*D*sn
# print "D=",D,is_fundamental_discriminant(D)
if(fd and not is_fundamental_discriminant(DD) and DD<>1):
# print "D=",D,is_fundamental_discriminant(D)
continue
if(prime and gcd(D,N)>1):
continue
t=self._space.rn_from_D(DD)
if(t == None):
continue
else:
(r,n)=t
#print " DD=",DD,t
nn=n+self._space.WR.Qv[r]
if(not pos and nn>=0):
continue
if(not neg and nn<0):
continue
c=self.get_coefficient(r,n)
cs=""
erms="";erm=10
if(c<>0 and c<>None):
if(nn>=0): ss="+"
if(nn<0):
ss="-"
if(norm_neg):
if( ((r,n)<> tnorm) and cnorm<>0):
c=c/cnorm*mpmath.sqrt(mpmath.mpf(abs(D)))
x=c.real(); x1=floor(x); x2=ceil(x); er1=abs(x1-x); er2=abs(x2-x)
erm=min(er1,er2);erms=sci_pretty_print(erm,2,latex_pow=latex)
if(erm<0.001):
if(er1<er2):
cs=str(x1)
else:
cs=str(x2)
elif(printimag==False):
if(nd>0):
cs=str(c.real()).strip()
cs=sci_pretty_print(cs,nd,latex_pow=latex)
else:
cs=str(c.real())
else:
if(nd>0):
cs=str(c).strip()
cs=sci_pretty_print(cs,nd,latex_pow=latex)
else:
cs=str(c)
if(c.real()>=0 and latex):
cs="\hphantom{-}"+cs
elif(c.real()>=0):
cs=" "+cs
if(latex):
O=" & "
if(Lvals and self._Lv.keys().count(DD)==1):
ls="&"+S+sci_pretty_print(self._Lv[DD],nd,latex_pow=latex)+S
else:
ls=""
if(len(erms)==0):
s= S+str(DD).center(w1)+S+"&"+S+cs+S+ls+"\\\\"
else:
s= S+str(DD).center(w1)+S+"&"+S+cs+S+ls+O+S+erms+S+"\\\\"
else:
if(Lvals and self._Lv.keys().count(DD)==1):
ls="\t"+sci_pretty_print(self._Lv[DD],nd)
else:
ls=""
if(len(erms)==0):
s= "C^"+ss+"["+str(DD).center(w1)+"] = "+cs+ls
else:
s= "C^"+ss+"["+str(DD).center(w1)+"] = "+cs+ls+" "+erms+"\n"
# s=s+str(self._space.WR.D[r]).ljust(w2)+","+str(n).ljust(w3)+"] = "+cs
print s
mpmath.mp.dps=mpold
def add_lderiv_vals(self,file=None):
r""" Reads values of the corresponding central derivatives: L'(G,chi_D,3/2-k) ""
"""
Lv=dict()
f=open(file,'r')
for line in f:
#print "line=",line
#print "split=",line.split(" ")
l=line.rsplit(" ",1)
if(len(l)==1):
l=line.rsplit("\t",1)
D=l[0].strip(); x=l[1].strip()
Lv[int(D)]=float(x)
self._Lv=Lv
def find_vanishing_lderivs(self,do_print=True,latex=True,nd=50):
r""" Returns a list with all discriminats for which it is likely that L'(G,chi_D,3/2-k)=0
"""
res=list()
if(latex):
S=" $ "; O=" & "
else:
S=" "; O=" "
if(len(self._Lv.keys())==0):
return res
L=self._Lv.keys(); L.sort(); L.reverse()
s=""; sc=""
## increase mpmath.mp.dps to print all relevant digits
mpold=mpmath.mp.dps
mpmath.mp.dps=self.maxdigs
for DD in L:
x=self._Lv[DD]
if(abs(x)<1E-10):
#res.append((DD,x))
res.append(DD)
s=s+S+str(DD)+S+O+S+sci_pretty_print(self._Lv[DD],nd,latex_pow=latex)+S+"\\\\ \n"
c=self.get_coefficient(DD)
if(c<>None):
x=c.real(); x1=floor(x); x2=ceil(x); er1=abs(x1-x); er2=abs(x2-x)
erm=min(er1,er2)
print "erm(",DD,")=",erm
erms=sci_pretty_print(erm,2,latex_pow=latex)
if(er1<er2):
xi=x1;
else:
xi=x2
#sc=sc+S+str(DD)+S+"\t"+O+S+sci_pretty_print(c.real,nd,latex_pow=latex)+"\\ \n"
sc=sc+S+str(DD)+S+O+S+str(xi)+S+O+S+erms+S+"\\\\ \n"
else:
sc=sc+S+str(DD)+S+O+S+" "+S+O+S+" "+S+"\\\\ \n"
print s
print sc
mpmath.mp.dps=mpold
return res
## def make_table(F,dmin,dmax):
## r"""
## """
## for D in range(dmin,dmax):
## c=F.one_coefficent(D)
## ci=int(c)
def smallest_inf_norm_mpmath(V):
r"""
Computes the smallest of the supremum norms of the columns of a matrix.
INPUT:
- ``V`` -- matrix (real/complex)
OUTPUT:
- ``t`` -- minimum of supremum norms of the columns of V
EXAMPLE::
sage: A=mpmath.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']])
sage: smallest_inf_norm(A)
mpf('5.5')
"""
minc=mpmath.mpf(100)
mi=0
for j in range(V.cols):
maxr=mpmath.mpf(0)
for k in range(V.rows):
t=abs(V[k,j])
if(t>maxr):
maxr=t
if(maxr<minc):
minc=maxr
mi=j
return minc
def matrix_norm(V,nt='max'):
r"""
Computes the smallest of the supremum norms of the columns of a matrix.
INPUT:
- ``V`` -- matrix (real/complex)
-''nt'' -- string (default 'max') type of matrix norm
OUTPUT:
- ``t`` -- norm of V
EXAMPLE::
sage: A=mpmath.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']])
sage: smallest_inf_norm(A)
mpf('5.5')
"""
t=0
ix=(0,0)
if(nt=='max'):
for j in range(V.cols):
for k in range(V.rows):
if(abs(V[k,j])>t):
t=abs(V[k,j])
ix=(k,j)
elif(nt=='inf' or nt=='Inf'):
# Max row sum
for n in range(V.rows):
summa=0
for l in range(V.cols):
summa=summa+abs(V[n,l])
if(summa>t):
t=summa
elif(nt=='L1' or nt=='l1'):
# max column sum
for l in range(V.cols):
summa=0
for n in range(V.rows):
summa=summa+abs(V[n,l])
if(summa>t):
t=summa
else:
raise NotImplementedError
return t,ix
def is_int(q):
r"""
Find out if the rational number q is an integer.
INPUT:
-''q'' -- integer/rational/real
OUTPUT:
- logical -- True if q is an integer otherwise False
EXAMPLES::
sage: is_int(1)
True
sage: is_int(float(1.0))
True
sage: is_int(RR(1.0))
True
sage: is_int(6/3)
True
sage: is_int(6/4)
False
sage: is_int(1.5)
False
sage: is_int(Gamma0(1))
False
"""
if(isinstance(q,Integer) or isinstance(q,int)):
return True
if(isinstance(q,Rational)):
n=q.denominator()
if(n==1):
return True
if(isinstance(q,tuple)):
return False
try:
if(floor(q)==ceil(q)):
return True
except:
pass
return False
def get_list_of_forms(Nmin,Nmax,compute=False):
r""" compute a whole list of various forms."""
FL=dict()
try:
for N in range(Nmin,Nmax):
print "N=",N
M=VVHarmonicWeakMaassForms(int(N),-0.5,75,dr=False,verbose=1)
print "minPP=",M.smallest_pp()
print "Compute form on ",M
#s="FN"+str(N)+"-DR-D"+str(F.get_principal_part(str=True))+".sobj"
s="FN"+str(N)+"-DR-D"+str(M.smallest_pp().keys()[0])+".sobj"
print "trying :",s
try:
F=load(s)
except IOError:
# the file did not exist
if(compute):
F=M.get_element(maxD=500)
save(F,s)
else:
continue
FL[N]=F
except KeyboardInterrupt:
pass
return FL
def check_relevant_forms(L):
r"""
Filter out those forms which might correspond to newforms on
Gamma0(N)
"""
L2=list()
for F in L.values():
#needed_ev=(
S=F.shim_corr
#print "S=",S
ok_ev=0
for g in S:
if(g.atkin_lehner_eigenvalue()==-1):
ok_ev=ok_ev+1
if(ok_ev>0):
print "Number of ok forms on ",F.space.WR.N," :",ok_ev
F.list_coefficents('D',fd=True,neg=False,nmin=0,nmax=1000,latex=False,nd=50,prime=True)
L2.append(F)
return L2
#__main__.VVHarmonicWeakMaassForms=VVHarmonicWeakMaassForms
#__main__.WeilRepDiscriminantForm=WeilRepDiscriminantForm
#__main__.HarmonicWeakMaassFormElement=HarmonicWeakMaassFormElement
| Python |
from inc_gamma import incgamma_int,incgamma_hint,pochammer
from mysubgroups_alg import SL2Z_elt,factor_matrix_in_sl2z,ncf_to_SL2Z_element
from permutation_alg import MyPermutation,MyPermutationIterator
from mysubgroup import MySubgroup,HeckeTriangleGroup,nearest_integer_continued_fraction
from maass_forms import MaassWaveForms,EisensteinSeries,scattering_determinant_Hecke_triangle
from lpkbessel import besselk_dp
from hilbert_modular_group_alg import Hn
from weil_rep_simple import WeilRepDiscriminantForm
#from eisenstein_series import *
#from pullback_algorithms import *
#from maass_forms_alg import *
#from maass_forms_phase2 import *
#from automorphic_forms_alg import *
from multiplier_systems import MultiplierSystem,TrivialMultiplier,ThetaMultiplier,EtaMultiplier,TestMultiplier,MultiplierByGenerator,InducedRepresentationMultiplier,WeilRepMultiplier,EtaQuotientMultiplier
from automorphic_forms import AutomorphicFormSpace,HalfIntegralWeightForms,HarmonicWeakMaassForms,HolomorphicModularForms,WeakModularForm,HarmonicWeakMaassForm
from poincare_series import PoincareSeries
#from poincare_series_alg import *
#from vv_harmonic_weak_maass_forms_alg import *
#from plot_dom import draw_fundamental_domain
from vv_harmonic_weak_maass_forms import VVHarmonicWeakMaassForms
| Python |
from prime import primes_of_bounded_norm
from misc import F, a
| Python |
from sage.rings.all import NumberField, QQ
R = QQ['x']
x = R.gen()
F = NumberField(x*x - x- 1, 'a')
a = F.gen()
| Python |
import sqrt5
| Python |
from modform import *
from matrix import *
from modules import *
from rings import *
from function_fields import FunctionField
from ellff import ellff_EllipticCurve
import number_fields
from zfunctions import *
| Python |
#################################################################################
#
# (c) Copyright 2010 Fredrik Stroemberg
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from sage.rings.complex_mpc import MPComplexField
from sage.all import ZZ,MatrixSpace,Matrix
from linalg_complex_dense import *
from matrix_complex_dense import *
def test_eigenvalues(prec=100,nmax=10,dimmax=10):
r"""
Test the eigenvalue computations for some random matrices.
"""
F = MPComplexField(prec)
dim = ZZ.random_element(2,dimmax)
M = MatrixSpace(F,dim)
for n in range(nmax):
A,U,l=random_matrix_eigenvalues(F,dim)
ev = A.eigenvalues()
ev.sort(); l.sort()
test = max([abs(ev[j]-l[j]) for j in range(len(ev))])
assert test < A.eps()*100
##
## Helper functions
##
def random_matrix_eigenvalues(F,n):
r"""
Give a random matrix together with its eigenvalues.
"""
l=list()
M = MatrixSpace(F,n)
U = Matrix(F,n)
D = Matrix(F,n)
for i in xrange(n):
x = F.random_element()
l.append(x)
D[i,i]=x
# now we need a unitary matrix:
# make a random choice of vectors and use Gram-Schmidt to orthogonolize
U = random_unitary_matrix(F,n)
UT = U.transpose().conjugate()
A = U*D*UT
l.sort(cmp=my_abscmp)
return A,U,l
| Python |
#
from matrix_complex_dense import Matrix_complex_dense
| Python |
# -*- coding: utf-8 -*-
r"""
Selberg Zeta function and transfer operator for Hecke triangle groups.
This file contains the classes SelbergZeta and TransferOperator and methods to compute the values of the Selberg Z-function on Hecke triangle groups using the transfer operator method.
For an explanation of the algorithm see F. Str\"omberg, 'Computation of Selberg zeta functions on Hecke triangle groups', Arxiv:...
AUTHORS:
- Fredrik Stroemberg (2012-05-15)
NOTE: If used with the implicit error bounds (i.e. iterate until the desired precision is reached) the methods might take *very* long time.
EXAMPLES::
sage: Z=SelbergZeta(3,verbose=0,working_prec=249,delta=1e-7)
sage: st8=Z.make_table_phi(prec=249,M0=150,N=2,outprec=66)
"""
#*****************************************************************************
# Copyright (C) 2012 Fredrik Strömberg <fredrik314@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import mpmath as mpmath
from sage.all import Parent,RR,ZZ,QQ,is_even,matrix,zeta,is_odd,is_even,ceil,log_b,log,gamma,tan,cos,sin,latex,CyclotomicField
from selberg_z_alg import *
from psage.rings import mpc_pochammer
from sage.rings.complex_mpc import MPComplexField
from sage.rings.complex_field import ComplexField
from psage.matrix import Matrix_complex_dense
from sage.all import MatrixSpace,sign,binomial
from psage.modform.maass.maass_forms import scattering_determinant_sl2z,scattering_determinant_Hecke_triangle
from sage.misc.sage_timeit import sage_timeit
from sage.parallel.decorate import *
def SelbergZ(q,verbose=0,working_prec=103,digits=5,delta=1e-5,**kwds):
return SelbergZeta(q,verbose,working_prec,digits,delta,**kwds)
class SelbergZeta(Parent):
r"""
The Selberg zeta function of a Fuchsian group.
So far, only implemented for Hecke triangle groups.
EXAMPLE::
sage: F=MPComplexNumbers(103)
sage: R=F(9.5336952613535575543442352359287703238212563951072519823757904641353489912983477817692555099754353664930447678582858545070606)
sage: s=R+F(0.5)
"""
def __init__(self,q,verbose=0,working_prec=103,digits=5,delta=1e-5,**kwds):
r""" Initalize the Selberg zeta function for the Hecke triangle group G_q.
INPUT::
- `q` -- integer (parameter for the Hecke triangle group)
- `working_prec` -- integer. The number of bits for initial working precision. This might need to be increased to reach desired precision.)
- `digits` -- integer. The number of digits we want the values to be accurate to.
- `delta` -- positive real number. The maximum distance of eigenvalues treated as the same eigenvalue.
- `verbose` -- integer. Set the verbosity of self.
"""
self._q=q
T=TransferOperator(q,verbose=verbose,prec=working_prec)
self._transfer_operator=T
self._MAXIT=10000
self._verbose=verbose
self._working_prec=working_prec
self._delta=delta
self._eps = 10.0**(-digits)
#if self._eps < 2.0**(1-prec):
# print "Not high enough precision to obtain eps={0}".format(self._eps)
if self._delta < 2.0**(1-working_prec):
print "Not high enough precision to obtain delta={0}".format(self._delta)
self._delta = 2.0**(3-working_prec)
self._deltad=ceil(abs(log_b(self._eps,10)))
#self._epsd=digits #ceil(abs(log_b(self._eps,10)))
def __repr__(self):
r""" Return string representation of self. """
s="Selberg Zeta function of the Hecke triange group G_%s."%(self._q)
return s
def __call__(self,s,**kwds):
r"""
Compute the value of Z(s).
INPUT:
- `s` -- complex number
- `M0` -- integer : the starting value of the rank of the approximation.
- `Nh` -- integer: the starting value of the differens in ranks of the approximations used.
- `get_digits` -- integer: set to 0 if you just want to run the algorithm once and se which precision you get.
Otherwise set to D if you want to keep iterating until D digits of precision is estimated.
- `get_err` -- integer : set to 0 if you just want to return the value and no error estimates in the case of get_digits>0
"""
return self.value(s,**kwds)
def value(self,s,N=0,Nh=0,sym=1,checks=1,get_digits=0, get_eps=0,get_err=1,ret_param=0,prec=0,approx=0,A1_in=None,A2_in=None,get_evs=0,verbose=0):
r"""
Compute the value of Z(s).
INPUT::
- `s` -- complex number
- `N` -- integer (default: computed). The starting value of the rank of the approximation.
- `Nh` -- integer (default 3). The starting value of the differens in ranks of the approximations used.
- `get_digits` -- integer (default 1). Possible values
0 -- if you just want to run the algorithm once and se which precision you get.
D -- integer >0. If you want to keep iterating until D digits of precision is estimated.
- get_eps -- real. Alternative to get_digits. Specifiy the precsion you want.
- `get_err` -- integer : set to 0 if you just want to return the value and no error estimates in the case of get_digits>0
- `checks` -- integer. Possible values:
- 1. Return the tuple:
z,eps
- 2. Return the tuple:
z,k,eps,delta
- >=3: Return the tuple:
z,k,eps,delta,err
Here:
- z =Z(s)
- k = number of eigenvalues used
- eps = abs. value of smallest used eigenvalue
- delta = max relative error in the used eigenvalues
- err = |phi_Z(s)-phi_E(s)|
where phi_Z is the scattering determinant, computed using Z
and phi_E is the same, but computed using either the explicit
formula, if q=3, or the Eisenstein series otherwise.
- `ret_param` -- integer. Set to 1 if you want to return the N and precision used in a second tuple.
-`get_evs` -- return a list of verified eigenvalues of the transfer operator of self.
"""
l=3
self.set_prec(s,prec)
verbose=max(self._verbose,verbose)
current_prec=self._working_prec
CCF = ComplexField(current_prec)
CF = MPComplexField(current_prec)
s0 = CCF(s.real(),s.imag())
s = CF(s0.real(),s0.imag())
s0=s
if get_digits>0 or get_eps>0:
if get_eps==0:
eps=10.0**(-get_digits)
else:
eps=get_eps
if verbose>0:
print "eps=",eps
else:
eps = 1.0
delta_eps = 1
err = 1
if N>0:
M00 = N
else:
M00 = ceil(abs(s.imag())*1.5)
if Nh>0:
Nh0=Nh
else:
Nh0=10
M=M00
err_old = 1
M_new=0
if verbose>0:
print "l=",l
if A1_in<>None or A2_in<>None:
A_tmp = self._transfer_operator.matrix_approximation(s,1)
else:
A_tmp = 0
for mj in range(1000):
#self._working_prec=current_prec
#s_hp = MPComplexField(self._working_prec)(s.real(),s.imag())
if A1_in<>None and type(A1_in)==type(A_tmp):
A1 = A1_in
elif approx==3:
A1=self._transfer_operator._approximation3(s,M)
else:
if sym==1:
A1plus=self._transfer_operator.matrix_approximation(s,M,sym=sym,eps=1)
A1minus=self._transfer_operator.matrix_approximation(s,M,sym=sym,eps=-1)
else:
A1=self._transfer_operator.matrix_approximation(s,M,sym=sym)
if A2_in<>None and type(A2_in)==type(A_tmp):
A2 = A2_in
elif approx==3:
A2=self._transfer_operator._approximation3(s,M+l)
else:
if sym==1:
A2plus=self._transfer_operator.matrix_approximation(s,M+l,sym=sym,eps=1)
A2minus=self._transfer_operator.matrix_approximation(s,M+l,sym=sym,eps=-1)
else:
A2=self._transfer_operator.matrix_approximation(s,M+l,sym=sym)
if sym==1:
ev1plus=A1plus.eigenvalues(sorted=1)
ev1minus=A1minus.eigenvalues(sorted=1)
ev2plus=A2plus.eigenvalues(sorted=1)
ev2minus=A2minus.eigenvalues(sorted=1)
ev1 = ev1plus; ev1.extend(ev1minus)
ev2 = ev2plus; ev2.extend(ev2minus)
else:
ev1=A1.eigenvalues(sorted=1)
ev2=A2.eigenvalues(sorted=1)
if verbose>2:
print "ev1=",ev1
print "ev2=",ev2
if checks>=1:
evs,delta_eps = get_close_values(ev1,ev2,self._delta,ret_err=1)
else:
evs = get_close_values(ev1,ev2,self._delta,ret_err=0,verbose=verbose)
if verbose>1:
print "evs=",evs
if verbose>0:
print "len(evs)=",len(evs)
z=CF(1)
for d in evs:
z*=(1-d)
if verbose>0:
print "M=",M
# We know that the "true" eigenvalues decrease to 0
# Hence the error can be estimated by the smallest
# "true" eigenvalue
if len(evs)>0:
err = min(map(abs,evs))
else:
err=1
if verbose>0:
print "Error estimate={0}".format(err)
print "Desired eps={0}".format(eps)
print "det(1-L)=",z
## Also print the quotient
k=self._transfer_operator.det_1_minus_K(s,prec=current_prec)
print "det(1-L)/det(1-K)=",z/k
if get_evs==1:
return evs
if err<eps or (get_digits==0 and get_eps==0):
if verbose>0:
print "exit loop: Error={0} less than eps={1}".format(err,eps)
break
## We now have to decide if we need to raise precision
## To choose a precision and appriximation size is the most difficult part of the algorithm
inc_prec=0; inc_appr=0
## The first time we always increase the approximation
## so we can get a better estimate of the error
if err_old==1:
inc_appr=1; err_old = err
M_old = M
elif M_new>0:
## If we have already tried to increase the approximation
## to an estimated 'good' value without success
## we try to increase the precision
inc_prec=1
if verbose>0:
print "inc_prec=",inc_prec
else:
r_test = exp(log(err_old/err)/(M_old-M))
C_test = err / r_test**M
if verbose>0:
print "r_test=",r_test
print "C_test=",C_test
if r_test<1:
M_new = ceil(abs(log(C_test/eps)/log(r_test)))
if M_new > 1.5*M:
M_new = M
if verbose>0:
print "M_new=",M_new
if M_new > M:
Nh0=M_new-M
inc_appr=0
else:
inc_prec=1
if len(evs)<self._deltad:
inc_prec=1
#if (mj % 2) == 1: ## Every 2 times we also increase
# inc_appr=1
if inc_appr==0 and inc_prec==0:
inc_appr=1; Nh0=5
if inc_appr==1:
if verbose>0:
print "Adding to M! M={0} Mplus={1}".format(M,Nh0)
M += Nh0
if inc_prec==1:
current_prec+=20
CF = MPComplexField(current_prec)
if verbose>0:
print "raising number of bits to {0}".format(current_prec)
s = CF(s0)
if verbose>0:
print "j={0}".format(mj)
if verbose>0:
if mj>=999:
print "warning: error={0}".format(err)
print "det(1-L)=",z
k=self._transfer_operator.det_1_minus_K(s,prec=current_prec)
if verbose>0:
print "det(1-K)=",k
print "get_err=",get_err
print "prec=",prec
print "current_prec=",current_prec
if hasattr(s,"prec"):
print "s.prec()=",s.prec()
z = z/k
if checks==1:
if ret_param==0:
return z,len(evs),float(err),float(delta_eps)
else:
return (z,len(evs),float(err),float(delta_eps)),(M,current_prec)
elif checks>=2: #get_err>0:
#if get_err==1: # Use error estimate from |lambda_K|
# return z,float(err)
#else:
## Use scattering determinant
if s.real()<>0.5:
if verbose>0:
print "computing L(1-s) with M=",M
z2 =self.value(1-s,N=M,Nh=Nh,sym=sym,checks=0,get_digits=get_digits,get_eps=get_eps,get_err=0,prec=prec,approx=approx)[0]
else:
z2 = z.conjugate()
p = self._psi(s,prec=prec)
ph = z2/z/p
if verbose>0:
print "psi=",p
print "ph=",ph
if self._q in [3,4,6]:
p1=scattering_determinant_Hecke_triangle(s,self._q,prec=current_prec)
else:
ndig = max(16,ceil(abs(log_b(err,10))))
if get_digits>0:
ndig = min(get_digits,ndig)
if get_eps>0:
ndig = min(ndig,ceil(abs(log_b(get_eps,10))))
pprec=dps_to_prec(ndig)
if verbose>0:
print "Compute Eisen with prec=",pprec
p1=scattering_determinant_Hecke_triangle(s,self._q,prec=pprec,use_eisenstein=1)
if verbose>0:
print "phi=",p1
er = abs(p1-ph)
if verbose>0:
print "er=",er
lev = len(evs)
err = float(RR(err))
delta_eps = float(RR(delta_eps))
er = float(RR(er))
if ret_param==0:
return (z,lev,err,delta_eps,er)
else:
return (z,lev,err,delta_eps,er),(M,current_prec)
elif get_err==0 and get_digits>0:
if ret_param==0:
return z
else:
return (z,lev,err,delta_eps,er),(M,current_prec)
else:
if ret_param==0:
return z,float(err)
else:
return (z,float(err)),(M,current_prec)
def scattering_determinant(self,s,N=0,Nh=0,sym=1,checks=1,get_digits=0, get_err=1,ret_param=0,prec=0,approx=0,outprec=0,verbose=0):
#prec=0,M0_start=0,Nh_start=0,do_once=0,checks=0,outprec=0):
r"""
Use the functional equation of Z(s) to compute the scattering determinant.
If checks=1 we return the tuple:
[phi,k,eps,delta]
where phi=phi(s)
- k = number of eigenvalues used
- eps = abs. value of smallest used eigenvalue
- delta = max relative error in the used eigenvalues
"""
old_prec = self._working_prec
if self._working_prec<prec:
self._working_prec=prec
CF = ComplexField(self._working_prec)
s = CF(s)
l = self.value(s,N=N,Nh=Nh,sym=sym,checks=1,get_digits=get_digits, get_err=get_err,ret_param=ret_param,prec=prec,approx=approx,verbose=verbose)
if ret_param==1:
M,pprec = l[1]
l = l[0]
if verbose>0:
print "M,pprec=",M,pprec
if verbose>0:
print "self.value=",l
p = self._psi(s,prec=self._working_prec)
if verbose>0:
print "self._psi={0} with prec={1}".format(p,p.prec())
if checks>=1:
z=l[0]; K=l[1];er=l[2]; delta=l[3]
elif get_err<>0:
z = l[0]
else:
z=l
if s.real()<>0.5:
if verbose>0:
print "Computing L(1-s) with N=",N
z2 = self.value(CF(1)-s,N=N,Nh=Nh,sym=sym,checks=checks,get_digits=get_digits, get_err=ge_err,ret_param=0,prec=prec,approx=approx,verbose=verbose)
#self.value(1-s,M0=M0_start,Mh=Mh_start,checks=0)
else:
z2 = z.conjugate()
if verbose>0:
print "working_prec=",self._working_prec
if z.prec()<>self._working_prec:
z = CF(z.real(),z.imag())
if z2.prec()<>self._working_prec:
z2 = CF(z2.real(),z2.imag())
ph = z2/z/p
ph = CF(ph.real(),ph.imag())
self._working_prec = old_prec
if checks==0:
if ret_param==0:
return ph
else:
return ph,(M,pprec)
else:
if self._q in [3,4,6]:
p1=scattering_determinant_Hecke_triangle(s,self._q,prec=prec)
else:
p1=0
if verbose>0:
print "p1=",p1
RF = RealField(3)
er1=RF(abs(p1-ph))
ndig = max(16,ceil(abs(log_b(er1,10))))
if get_digits>0:
ndig = min(get_digits,ndig)
eisen_prec=dps_to_prec(ndig)
if eisen_prec<53:
eisen_prec=53 ## We want to use at least double precision
if verbose>0:
print "Need {0} digits.".format(ndig)
print "Compute Eisen with prec=",eisen_prec
p2 = scattering_determinant_Hecke_triangle(s,self._q,prec=eisen_prec,use_eisenstein=1)
if verbose>0:
print "p2=",p2
er2=RF(abs(p2-ph))
er =RF(er)
delta = RF(delta)
if outprec>0:
CF = ComplexField(outprec)
ph = CF(ph.real(),ph.imag())
if ret_param==0:
return ph,K,er,delta,er1,er2
else:
return (ph,K,er,delta,er1,er2),(M,pprec)
def set_prec(self,s=None,prec=0):
if prec==0:
if hasattr(s,"prec"):
prec = s.prec()
else: ## If e.g. s is symbolic s= 1 + I
prec = self._working_prec
self._working_prec=prec
def Theta(self,t,branch=0):
r"""
The argument of Z(1/2+it)
branch -- 0 : Use principal branch, 1: cut at positive axis
"""
self.set_prec(t)
CF = ComplexField(self._working_prec)
RF = CF.base_ring()
s = CF(0.5,t)
z = scattering_determinant_Hecke_triangle(s,self._q,use_eisenstein=0)
z = z*self._psi(s)
ar = z.argument()
if branch <> 0:
x = z.real()
y = z.imag()
if y<0:
ar = ar + RF(2)*RF.pi()
return ar
def RealZ(self,t,M0,Mh=3,do_once=1,checks=0,branch=0):
self.set_prec(t)
CF = ComplexField(self._working_prec)
s = CF(0.5,t)
z=self.value(s,M0_start=M0,Mh_start=Mh,do_once=do_once,checks=checks)
th = self.Theta(t,branch)
z = z*CF(0,0.5*th).exp()
return z
def values_in_range(self,t0=5,t1=10,N=100,M0=0,prec=0):
r"""
Compute the values of Z(1/2+it) for t in a specific range.
INPUT:
-
"""
if prec>0: self.set_prec(prec,None)
CF = ComplexField(self._working_prec)
RF = CF.base_ring()
t0 = RF(t0); t1=RF(t1)
h = (t1-t0)/RF(N)
M0 = 0
zold = CF(1)
res=[]
branch = 0
for i in range(N):
t = t0+h*i
znew,M1 = self.RealZ(t,M0,branch=branch)
res.append(znew)
if i==0:
zold=znew
continue
# if abs(znew)<0.5*h:
# ## We are close to a zero and keep the current branch
# branch = 0
# if abs(zold)<0.5*h:
# ## We might have passed through a zero and if so use same branch
# slope = (znew-zold).real()/h
# if abs(zold-znew)>h:
zold = znew
return res
def make_table_phi(self,prec=0,N=50,ls=1,lf=10,target_dig=0,get_times=0,outprec=63,verbose=0):
r"""
Produce a LaTeX table of values and error estimates of self.
INPUT:
- ``prec`` -- integer. Set the working precision.
- `N` -- integer. The approximation size to use.
- `ls` -- integer
- `lf` -- integer
- `target_dig` -- integer. If >0 we run the algorithm until target_dig (decimal) digits of precision is found
- `outprec` -- integer (the precision to use for the output)
OUTPUT:
A string.
"""
st=""
precold = self._working_prec
if prec>0:
self._working_prec=prec
row_args=[]
rows={}
for l in range(ls,lf+1):
row_args.append((l,prec,N,target_dig,get_times,outprec,verbose))
#stl = self.make_table_row(l,prec=prec,N=N,target_dig=target_dig,get_times=get_times,outprec=outprec,verbose=verbose)
rows = sorted(list(self.make_table_row(row_args)))
st=""
print "rows=",rows
for row in rows:
if verbose>0:
print "row=",row
if len(row[1])>1:
l,stl = row[1]
else:
l = row[1]
if verbose>0:
print "l=",l
print "stl=",stl
st+=stl
self._working_prec=precold
return st
# @fork(verbose=True)
#@parallel(p_iter='fork',ncpus=4)
def make_table_row(self,l,prec=0,N=50,target_dig=0,get_times=0,outprec=63,verbose=0):
r"""
Make one row of the table.
"""
CF = ComplexField(self._working_prec)
if isinstance(l,list) and isinstance(l[0],tuple):
ll,prec,N,target_dig,get_times,outprec,verbose=l[0]
l = ll
#if verbose>0:
print "l=",l
s = CF(0.5,l)
if verbose>0:
print "Making table row for s={0} with N={1} and prec={2}".format(s,N,self._working_prec)
if target_dig==0:
ll = self.scattering_determinant(s,N=N,checks=3,outprec=outprec,verbose=verbose-1)
else:
ll = self.scattering_determinant(s,N=N,checks=3,get_digits=target_dig,outprec=outprec,ret_param=1,verbose=verbose-1)
M,pprec = ll[1]
ll = ll[0]
if M>N:
N = M
if verbose>0:
print "Increased: Got M ={0} and pprec= {1}.".format(M,pprec)
if pprec>self._working_prec:
self._working_prec=pprec
pprec = latex(pprec)
if get_times:
if verbose>0:
print "ROW: {0}".format(ll)
print "We are now recomputing in order to get the time."
globals_dict = globals()
globals_dict['self']=self
globals_dict['s1']=ComplexField(self._working_prec)(0.5,l)
globals_dict['N']=N
globals_dict['outprec']=outprec
stime = sage_timeit('self.scattering_determinant(s1,N=N,checks=0,outprec=outprec,verbose=0)',
globals_dict=globals_dict,seconds=True,number=1,repeat=1)
else:
stime=""
z,K,er,delta,er1,er2=map(latex,ll)
if target_dig==0:
if self._q in [3,4,6]:
stl="${0}$ & ${1}$ & ${2}$ & ${3}$ & ${4}$ & ${5}$ & ${6}$\\\\ \n".format(l,z,er1,er2,K,er,delta)
else:
stl="${0}$ & ${1}$ & ${2}$ & ${3}$ & ${4}$ & ${5}$ & ${6}$\\\\ \n".format(l,z,er2,K,er,delta)
else:
if self._q in [3,4,6]:
stl="${0}$ & ${1}$ & ${2}$ & ${3}$ & ${4}$ & ${5}$ & ${6}$ & ${7}$ & ${8}$ & ${9}$\\\\ \n".format(l,z,er1,er2,K,er,delta,M,pprec,stime)
else:
stl="${0}$ & ${1}$ & ${2}$ & ${3}$ & ${4}$ & ${5}$ & ${6}$ & ${7}$ & ${8}$ \\\\ \n".format(l,z,er2,K,er,delta,M,pprec,stime)
if verbose>0:
print "stl=",stl
return l,stl
def make_table_phi2(self,s,prec_list=[53,83,166,249],N1=10,N2=70,outprec=63,time=1,verbose=0):
r"""
Produce a LaTeX table of values and error estimates of self for precision and N in ranges and fixed s.
INPUT:
- ``prec`` -- integer. Set the working precision.
- `N` -- integer. The approximation size to use.
- `ls` -- integer
- `lf` -- integer
- `outprec` -- integer (the precision to use for the output)
OUTPUT:
A string.
"""
st=" s = {0} \n".format(s)
precold = self._working_prec
for p in prec_list:
if p <2:
continue
self._working_prec = p
s1 = ComplexField(p)(s.real(),s.imag())
for N in range(N1,N2+1,10):
if verbose>0:
print "Calculate for prec={0} and N={1}".format(p,N)
if N == N1:
p0 = p
else:
p0 =""
ll = self.scattering_determinant(s1,N=N,checks=3,outprec=outprec)
z,K,er,delta,er1,er2=map(latex,ll)
if time==1:
## Do a timeit for the proces without checks
globals_dict = globals()
globals_dict['self']=self
globals_dict['s1']=s1
globals_dict['N']=N
globals_dict['outprec']=outprec
stime = sage_timeit('self.scattering_determinant(s1,N=N,checks=0,outprec=outprec,verbose=0)',
globals_dict=globals_dict,seconds=True)
#{'self':self,'s1':s1,'N':N,'outprec':outprec},seconds=True)
else:
stime = ""
if self._q in [3,4,6]:
st+="${0}$ & ${1}$ & ${2}$ & ${3}$ & ${4}$ & ${5}$ & ${6}$ & ${7}$ \\\\ \n".format(p0,N,z,er1,K,er,delta,stime)
else:
st+="${0}$ & ${1}$ & ${2}$ & ${3}$ & ${4}$ & ${5}$ & ${6}$ & ${7}$ \\\\ \n".format(p0,N,z,er2,K,er,delta,stime)
self._working_prec=precold
return st
def test_functional_equation(self,s,prec=0,N=0,Nh=0,do_once=0):
r"""
Test the functional equation of self for s--> 1 - s
"""
lhs = self.scattering_determinant(s,prec,N,Nh)
phi = scattering_determinant_Hecke_triangle(self._q,s,prec)
if self._verbose>0:
print "Z(s)=",z
print "phi(Zeta)(s)=",lhs
print "phi(s)=",phi
return abs(lhs-phi)
def _psi(self,s,prec=0):
r""" Compute the factor Psi(s)=Psi(1/2)Z(1-s)/Z(s) in the Functional equation for the Selberg zeta function (note that the factor Phi(1/2)=1 or -1) see e.g. Hejhal STF vol. 2: p. 499-500.
INPUT::
- `s` -- complex number
- `prec` -- integer (working precision)
OUTPUT::
- `Psi(s)=+-1*Z(1-s)/Z(s)`
"""
if hasattr(s,"prec"):
prec = s.prec()
elif prec>0:
prec = prec
else:
prec = self._working_prec
mpmath.mp.prec = prec
CF = ComplexField(prec)
ss=CF(s.real()-0.5,s.imag()); sigma=ss.real(); T=ss.imag()
s1=CF(1.5-s.real(),-s.imag()); s2=CF(0.5+s.real(),s.imag())
fak1=mpmath.mp.gamma(s1)/mpmath.mp.gamma(s2)
mpi = mpmath.mp.mpc(0,1)
mp1 = mpmath.mp.mpf(1)
mppi = mpmath.mp.pi
mppii = mpmath.mp.pi*mpi
twopi = mppi*mpmath.mp.mpf(2)
twopii = mppi*mpi*mpmath.mp.mpf(2)
A=mpmath.mp.mpf(self._q-2)/mpmath.mp.mpf(self._q)*mppi
f1 = lambda y: y*mpmath.mp.tan(mppii*y)
IH1=-mpmath.mp.quad(f1,[0,T])
f2 = lambda x: mpmath.mp.mpc(x,T)*mpmath.mp.tan(mppi*mpmath.mp.mpc(x,T))
IH2=mpmath.mp.quad(f2,[0,sigma])
H1=-A*(IH1+IH2)
f3 = lambda y : mpmath.mp.cos(mppi*mpi*y)**-1
IE1=mpi*mpmath.mp.quad(f3,[0,T])
f4 = lambda x: mpmath.mp.cos(mppi*mpmath.mp.mpc(x,T))**-1
IE2= mpmath.mp.quad(f4,[0,sigma])
m=self._q
E1=mppi*(IE1+IE2)/mpmath.mp.mpf(2)
if self._verbose>2:
print "prec=",prec
print "mpmath.mp.dps=",mpmath.mp.dps
print "mpmath.mp.prec=",mpmath.mp.prec
print "IH1=",IH1
print "IH2=",IH2
print "IE1=",IE1
print "IE2=",IE2
print "E1=",E1
for k in range(1,m): #from 1 to m-1 do:
km = mpmath.mp.mpf(k)/mpmath.mp.mpf(m)
g1 = lambda t: mpmath.mp.exp(twopi*km*t) / (mp1+mpmath.mp.exp(twopi*t ))+mpmath.mp.exp(-twopi*km*t)/(mp1+mpmath.mp.exp(-twopi*t))
IE11 = mpi*mpmath.mp.quad(g1,[0,T])
g2 = lambda x: mpmath.mp.exp(-twopii*km*mpmath.mp.mpc(x,T)) / (mp1+mpmath.mp.exp(-twopii*mpmath.mp.mpc(x,T) ))+mpmath.mp.exp(twopii*km*mpmath.mp.mpc(x,T))/(mp1+mpmath.mp.exp(twopii*mpmath.mp.mpc(x,T)))
IE12 = mpmath.mp.quad(g2,[0,sigma])
E1=E1+mppi*(IE11+IE12)/mpmath.mp.mpf(m)/mpmath.mp.sin(mppi*km)
if self._verbose>2:
print "E1[",k,"]=",E1
P1=CF(1-2*s.real(),-2*s.imag())*mpmath.mp.ln(mpmath.mp.mpf(2))
P=fak1*mpmath.mp.exp(H1+E1+P1)
if self._verbose>2:
print "P1=",P1
print "E1=",E1
print "H1=",H1
return ComplexField(prec)(P.real,P.imag)
def RealZ(self,t,M0=0,Mh=3,do_once=0,branch=0,prec=0,tol=0.01,get_err=0):
r""" Compute the rotated Selberg zeta function, i.e. Z(1/2+it)*exp(-iArgZ(1/2+it)). """
if self._verbose>0:
print "RealZ at t=",t
if hasattr(t,"imag"):
if t.imag()<>0:
raise ValueError,"Call with real argument only! Got:{0}".format(t)
t = t.real()
self.set_prec(t,prec)
CF = ComplexField(self._working_prec)
s =CF(0.5,t)
z = self.value(s,M0_start=M0,Mh_start=Mh,do_once=do_once,get_err=get_err)
th= self.Theta(t,branch)
if get_err>=1:
er = z[1]
z = z[0]
res = z*CF(0,0.5*th).exp()
if abs(res.imag())>tol:
print "Warning: function not real:{0}".format(res)
if get_err==0:
return res
else:
return res,er
def theta(t,branch=0):
r"""
The argument of Z(1/2+it).
INPUT:
- `t` -- real number
- `branch` -- choice of branch cut
"""
prec = t.prec()
CF = ComplexField(prec)
s = CF(0.5,t)
z=scattering_determinant_hecke_triangle(self._q,s)*self._psi(s,q)
ar=arg(z)
if branch<>0: # Do not use principal branch
# Use branch cut at positive axis instead
x=z.real()
y=z.imag()
if y<0:
ar=ar+CF(2)*CF.base_ring().pi()
return ar
def values_on_interval(t1,t2,step_size=0.05,M0=0,tol=1E-4,filename=None):
r"""
Computes the Selberg zeta function along an interval
and writes the values into a file
"""
CF = ComplexField(self._working_prec)
RF = RealField(self._working_prec)
if filename==None:
filename='selberg_zeta-q{0}-{1}--{2}.txt'.format(self._q,t1,t2)
if M0==0:
M0=20
if t2<t1:
raise ValueError,"Need t2 > t1!"
if step_size==0:
if ns==0:
raise ValueError,"Need either number of steps or stepsize!"
step_size = RF(t2-t1)/RF(ns)
if ns==0:
ns = ceil(RF(t2-t1)/step_size)
fp = open(filename)
z_old = CF(1)
zl=[]; tl=[]
for i in range(ns):
t = t1+i*step_size
if z_old.real()>=0:
branch=0
else:
branch=1
tl.append(t)
z,M = self.Realz(t,M0_start=M0,branch=branch,return_M0=1)
zl.append(z)
fp.write((t,z))
M0=M
if i==0:
continue
if i==1:
if z.real()>0:
branch = 0
else:
branch = 1
continue
### Determine (experimentally) the branch to use
if abs(z)<0.5*h: # We are probably close to or just passed a zero
slope = (zl[i]-zl[i-2])/(t[i]-t[i-2])
const = zl[i-1] - slope*tl[i-1]
app_val = slope*t+ const
if app_val.real()*z.real()<0:
oldsign = -1
z = sign(oldsign)*z
class TransferOperator(Parent):
r"""
Transfer Operator for the geodesic flow on the Hecke triangle group G_q.
"""
def __init__(self,q,prec=53,verbose=0):
r""" Initializes the transfer operator for the geodesic flow on G_q. """
self._q=q
self._is_set=False
self._prec=prec
#self._dps=mpmath.mp.dps
self._verbose=verbose
self._R=0
self._lambdaq=0
self._lambdaq_r=0
self._setup_transfer_operator()
self._MAXIT=100000
def __repr__(self):
r""" Return string representation of self. """
s="Transfer operator of Hecke triangle group G_{0}\n".format(self._q)
#s+=" lambda="+str(self._lambdaq)
#s+="\n R="+str(self._R)
#s+="\n h="+str(self._h)
#s+="\n dim="+str(self._dim)
return s
def _setup_transfer_operator(self,prec=0):
r"""
Setup the necessary data for the transfer operator.
"""
#ctx=self._ctx
if self._is_set and (prec==0 or prec>0 and prec<self._prec):
return
if prec>0:
self._prec=prec
q=self._q
RF = RealField(self._prec)
mp1=RF(1); mp2=RF(2); mp4=RF(4)
qr=RF(q)
llambda=mp2*(RF.pi()/qr).cos()
dim=0
if is_even(q):
#R=mp1
h=ZZ(QQ(q-2)/QQ(2))
dim=ZZ(2*h)
# print "dim=",dim
NIJ=matrix(ZZ,dim)
NIJ[0,h-1]=2
for i in range(1,h):
NIJ[i,i-1]=1
NIJ[i,h-1]=2
for i in range(h,2*h):
NIJ[i,h-1]=1
NIJ[2*h-1,h-1]=1
for j in range(dim):
for i in range(h):
#print "lhs=",j,dim-1-i,"rhs=",dim-1-j,i
NIJ[j,dim-1-i]=-NIJ[dim-1-j,i]
elif q==3:
R=(RF(5).sqrt()-mp1)/mp2
dim=ZZ(2)
h=ZZ(0)
NIJ=matrix([[3,-2],[2,-3]])
elif q>=5:
#R=llambda/mp2-mp1
#R+=mp1/mp2*((mp2-llambda**2)+mp4).sqrt()
h=ZZ(QQ(q-3)/QQ(2))
dim=ZZ(4*h+2)
NIJ=matrix(ZZ,dim)
NIJ[0,2*h-1]=2
NIJ[0,2*h]=3
NIJ[1,2*h]=2
if self._verbose>0:
print "h=",h
print "dim=",dim
for i in range(2,2*h+2):
NIJ[i,i-2]=1
NIJ[i,2*h]=2
for i in range(2*h+2,4*h+1):
NIJ[i,2*h-1]=1
NIJ[i,2*h]=2
for i in range(4*h+1,4*h+2):
if self._verbose>1:
print i,2*h-1,2*h
NIJ[i,2*h-1]=1
NIJ[i,2*h]=2
for j in range(0,dim):
for i in range(0,2*h+1):
#print "lhs=",j,dim-i,"rhs=",dim-j,i
NIJ[j,dim-1-i]=-NIJ[dim-1-j,i]
self._is_set=True
self._h=h
self._lambdaq_r=llambda
self._R=self.R(prec=prec)
if self._q<>3:
K = CyclotomicField(2*self._q)
z = K.gens()[0]
llambda = z+z.conjugate()
if self._verbose>0:
print "lambda=",llambda
print "RR(lambda)=",llambda.complex_embedding()
print "RR(z)=",z.complex_embedding()
if not llambda.is_real_positive():
if not (-llambda.is_real_positive()):
raise ArithmeticError,"Could not get lambda!"
else:
llambda = -llambda
else:
llambda=1
if self._verbose>0:
print "lambda=",llambda
self._lambdaq=llambda
self._dim=dim
self._Nij=NIJ
if is_odd(self._q):
self._numi = 2*self._h+1
else:
self._numi = self._h
def R(self,prec=0,format='float',verbose=0):
if prec>0:
RF = RealField(prec)
else:
prec = self._prec
RF = RealField(self._prec)
if format=='float':
if self._R<>0 and self._prec>=prec:
return RF(self._R)
if is_even(self._q):
self._R=RF(1)
elif self._q==3:
self._R=RF(RF(5).sqrt()-1)/RF(2)
else:
lambdaq=self.lambdaq_r(prec)
self._R = RF((2-lambdaq)**2+4).sqrt()/RF(2) + lambdaq/RF(2)-RF(1)
if verbose>0:
print "R1=",RF((2-lambdaq)**2+4).sqrt()/RF(2) + lambdaq/RF(2)-RF(1)
print "R2=",-RF((2-lambdaq)**2+4).sqrt()/RF(2) + lambdaq/RF(2)-RF(1)
return self._R
elif format=='alg':
if self._R_alg<>0:
return self._R_alg
if is_even(self._q):
self._R_alg = 1
#elif self._q==3:
#
# self._R_alg=(ZZ(5).sqrt()-1)/RF(2)
else:
lambdaq=self.lambdaq()
F = lambdaq.parent()
z = F['z'].gens()[0]
f = z^2+(2-llambda)*z+1
print "f=",f
#self._R = RF((2-lambdaq)**2+4).sqrt()/RF(2) + lambdaq/RF(2)-RF(1)
def lambdaq(self):
return self._lambdaq
def lambdaq_r(self,prec=0):
if prec>self._prec or self._lambdaq_r==0:
if hasattr(self._lambdaq,"complex_embedding"):
self._lambdaq_r=self._lambdaq.complex_embedding(prec).real()
else:
self._lambdaq_r=RealField(prec)(self._lambdaq)
return self._lambdaq_r
def numi(self):
return self._numi
def dim(self):
return self._dim
def h(self):
return self._h
def Nij(self):
return self._Nij
def trace(self,s,eps=1e-10,imax=10000):
r"""
Compute the trace of self at s.
"""
trace = 0
for m in self._Nij.diagonal():
if m==0:
continue
if m<0:
sg=-1
else:
sg=1
tmptrace = 0
for n in range(abs(m),imax):
tmp = self.trace_ns(n*sg,s)
tmptrace+=tmp
if abs(tmp)<eps and n>10:
break
if self._verbose>0:
print "number of iterations: {0}".format(n)
print "Trace[{0}]={1}".format(m,tmptrace)
print "Lasterr=",abs(tmp)
trace+=tmptrace
return trace
def trace_ns(self,n,s):
prec = s.prec()
nn = self.norm_n(n,prec)**-1
one = nn.parent()(1)
return nn**s/(one-nn)
# @cached_method
def norm_n(self,n,prec=None):
r"""
Returns the trace of pi_s(ST^n) = N(ST^n)^-s/(1-N(ST^n)^-1)
"""
if prec<>None:
l = self.lambdaq_r(prec)
else:
l = self.lambdaq_r()
nl = n*l
two = l.parent()(2)
a = nl**2-two
b = abs(nl)*(nl**2-two**2).sqrt()
x = (a+b)/two
if x<1:
if self._verbose>0:
print "n=",n
print "x=",x
x = x - b
assert x>1
return x
def get_markov_partition(self,itype='alg',prec=0,iformat='ie'):
r"""
Get the Markov partition (or rather half of it)
- format -- set to 'ie' if you want a list of end points x_i
set to 'is' if you want a list of [x_i,x_{i+1}]
set to 'cr' if you want a list of [c_i,r_i]
"""
## First get the algebraic version of lambda
llambda_2 = self.lambdaq()/QQ(2)
iphi=[llambda_2]
numi = self.numi()
if self._verbose>1:
print "num int=",numi
print "lambda/2=",llambda_2
if hasattr(llambda_2,"complex_embedding"):
print "lambda/2=",llambda_2.complex_embedding()
y = llambda_2
for i in range(numi):
if self._verbose>1:
print "y=",y
y = self.fq(y)
iphi.append(y)
if self._verbose>1:
print "iphi=",iphi
if itype=='float':
res = []
if prec==0:
prec = self._prec
RF = RealField(self._prec)
for x in iphi:
if hasattr(x,"complex_embedding"):
x = x.complex_embedding(prec).real()
elif hasattr(x,"real"):
x = x.real()
else:
x = RF(x)
res.append(x)
res.sort()
else:
iphi.sort(cmp=my_alg_sort)
res = iphi
if iformat=='ie':
return res
elif iformat=='cr':
res2 = []
for i in range(len(res)-1):
x1=res[i]; x2=res[i+1]
if iformat=='is':
res2.append((x1,x2))
else:
c = (x1+x2)/2
r = abs(x2-x1)/2
res2.append((c,r))
return res2
elif iformat=='ar':
alphas={}; rhos={}
i = 0
res.reverse()
for i in range(len(res)-1):
x1=res[i]; x2=res[i+1]
c = (x1+x2)/2
r = abs(x2-x1)/2
alphas[i]=-c
alphas[i+self._dim/2]=c
rhos[i]=r
rhos[i+self._dim/2]=r
i+=1
return alphas,rhos
def get_contracting_discs(self,iformat='ar',verbose=0):
r"""
Compute intervals of contraction as in Lemma 4.4. of [MMS2012]
REFERENCES:
[MMS2012] Mayer, M\"uhlenbruch, Str\"omberg, 'The Transfer Operator for the Hecke Triangle Groups', Discrete Contin. Dyn. Syst, Vol. 32, No. 7, 2012.
"""
intervals={}
if self._q==3:
intervals={1:[-1,QQ(1)/QQ(2)],2:[-QQ(1)/QQ(2),1]}
elif self._q==4:
intervals={1:[-1,self.lambdaq_r()/4],2:[-self.lambdaq_r()/4,1]}
elif is_odd(self._q):
dim_2=ZZ(self._dim).divide_knowing_divisible_by(ZZ(2))
llambdaq_4=self.lambdaq_r()/4
for i in range(self._h+1):
l2ip1=[-1]
for j in range(i):
l2ip1.append(-1)
l2ip1.append(-2)
for j in range(self._h):
l2ip1.append(-1)
if verbose>0:
print "cf[2*{0}+1]={1}".format(i,l2ip1)
x0 = self.cont_frac_to_pt(l2ip1)
intervals[2*i+1]=[x0,llambdaq_4]
if verbose>0:
print "intervals[{0}]={1}".format(2*i+1,intervals[2*i+1])
for i in range(1,self._h+1):
l2i=[-1]
for j in range(i):
l2i.append(-1)
x0 = self.cont_frac_to_pt(l2i)
intervals[2*i]=[x0,llambdaq_4]
if verbose>0:
print "intervals[{0}]={1}".format(2*i,intervals[2*i])
for i in range(1,self._h+1):
x1 = intervals[2*i][0]; x2 = intervals[2*i][1]
if verbose>1:
print "x1,x2[{0}]={1}".format(2*i,(x1,x2))
intervals[2*i+dim_2]=[-x2,-x1]
for i in range(self._h+1):
x1 = intervals[2*i+1][0]; x2 = intervals[2*i+1][1]
if verbose>1:
print "x1,x2[{0}]={1}".format(2*i+1,(x1,x2))
intervals[2*i+1+dim_2]=[-x2,-x1]
else:
raise NotImplementedError
if verbose>1:
print "intervals=",intervals
print "intervals.keys()=",intervals.keys()
if iformat=='ar':
alphas={}
rhos={}
for i in range(1,len(intervals)+1):
x1 = intervals[i][0];x2 = intervals[i][1]
if verbose>1:
print "x1,x2[{0}]={1}".format(i,(x1,x2))
c = (x1+x2)/2
r = abs(x1-x2)/2
alphas[i-1]=c
rhos[i-1]=r
return alphas,rhos
return intervals
def cont_frac_to_pt(self,cf=[],prec=0,verbose=0):
r"""
Compute the point corresponding to a nearest lambda continued fraction.
INPUT:
- `cf` list. A nearest continued fraction expansion given as a list with format:
cf = [a_0,a_1,...] where a_0 is the integer part and a_j are integers.
- `prec` -- integer. The bits of precision of the returned value.
- `verbose` -- integer. Set the verbosity.
OUTPUT:
- real number of precision prec.
"""
if prec==0:
prec = self._prec
RF = RealField(prec)
frac_part = cf[1:]; n = len(frac_part)
x = RF(0)
verbose = max(verbose,self._verbose)
for j in range(n):
x1 = self.STn(x,frac_part[n-j-1])
if verbose>1:
print "ST^{0}({1})={2}".format(frac_part[n-j-1],x,x1)
x = x1
x = x+self.lambdaq_r(prec)*RF(cf[0])
return x
def nearest_lambda_code(self,x,N=0):
r""" Compute the nearest lambda continued fraction of x up to N symbols if N>0. """
if hasattr(x,"prec"):
prec = x.prec()
lambdaq_r = self.lambdaq_r(prec)
elif isinstance(x,Rational):
prec = 0
N = -1
lambdaq = self.lambdaq()
lambdaq_r = self.lambdaq_r()
else:
prec = 53
lambdaq_r = self.lambdaq_r(prec)
RF = RealField(prec)
a0 = self.nearest_lambda_mult(z)
res = [a0]
if prec>0:
x = x - a0*lambdaq_r
else:
x = x - a0*lambdaq
j = 0
while x<>0:
x1,n = self.fq(y,retn=1)
x = x1
res.append(n)
if j>=N and N>0:
break
j+=1
return res
def STn(self,x,n,prec=0):
r""" Return ST^n_q(x)= -1/(x+n*lambda_q). """
if prec==0:
prec = self._prec
RF = RealField(prec)
return RF(-1)/RF(x+n*self.lambdaq_r(prec))
def STn_fixed_pts(self,n,ret="f"):
r"""
Compute fixed points of
ST^n: x -> -1/(x+n)
"""
x1 = (-n+QQ(n*n-4).sqrt())/QQ(2)
x2 = (-n-QQ(n*n-4).sqrt())/QQ(2)
if ret=="f":
x1 = RR(x1); x2 = RR(x2)
t1 = abs(x1+n)**2
t2 = abs(x2+n)**2
if t1>1 and t2<1: ## contracting fixed point
return x1,x2
if t1<1 and t2>1: ## contracting fixed point
return x2,x1
if x1==x2:
return x1
def fq(self,y,retn=0):
r""" Compute f_q(x)=-1/x - n*lambda_q where n = nearest lambda multiple to -1/x. """
if y==0:
return 0
if abs(y) > self.lambdaq_r()/2:
n = self.nearest_lambda_mult(y)
y = y - n
else:
z = -y**-1
n = self.nearest_lambda_mult(z)
y = z - n*self.lambdaq_r()
if self._verbose>1:
print "-1/x={0}".format(z)
print "-1/x-{0}*lambda={1}".format(n,y)
if retn==1:
return y,n
return y
def nearest_lambda_mult(self,x):
r""" Compute the nearest lambda multiple. """
if hasattr(x,"prec"):
prec = x.prec()
else:
prec = 53
RF = RealField(prec)
if hasattr(x,"complex_embedding"):
if x<>x.conjugate():
raise ArithmeticError,"Call only with real argument!"
x = x.complex_embedding().real()
elif hasattr(x,"real"):
x = x.real()
else:
x = RF(x)
if self._q<>3:
x = x.real()/self.lambdaq_r(prec)
y = x+RF(0.5)
if self._verbose>1:
print "x/lambda+1/2={0}".format(y)
## Need my own defined floor function
return my_floor(float(y))
def matrix_approximation(self,s,M,sym=1,it=1,eps=1,alpha_in={},rhos_in={}):
r"""
Compute the approximation to self at s by a finite rank (matrix) operator acting on the Banach space B of vector-valued holomorphic functions on a product of discs D x ... x D with D centered at 0.
INPUT:
- `s` -- complex number.
- `M` -- integer. The size of the the finite rank approximation. (The point of truncation of the Power series representating functions in B).
"""
if hasattr(s,"prec"):
prec = s.prec()
else:
prec = 53
CF = MPComplexField(prec)
RF = CF.base()
self._setup_transfer_operator(prec)
dim=self._dim
B = self._Nij
ll = self.lambdaq_r(prec) #CF._base(self._lambdaq)
Nmax = max(map(abs,self._Nij.list()))
if self._verbose>0:
print "Nmax = ",Nmax
print "M=",M
print "dim=",dim
s = CF(s.real(),s.imag())
if sym==0:
## We use unsymmetrized operator
MS=MatrixSpace(CF,dim*(M+1))
A=Matrix_complex_dense(MS,0)
trop_approximation(A,self._Nij,M,self._q, dim, Nmax, s,ll,verbose=verbose)
else:
## We use symmetrized operator
sym_dim=ZZ(dim).divide_knowing_divisible_by(ZZ(2))
if self._verbose>0:
print "sym_dim=",sym_dim
MS=MatrixSpace(CF,sym_dim*(M+1))
A=Matrix_complex_dense(MS,0)
## The question is now which intervals to use.
if it==0:
alphas=[RF(0) for x in range(self._dim)]
rhos=[RF(1) for x in range(self._dim)]
elif it==1: ## Use discs from the Markov partition
alphas,rhos=self.get_markov_partition(itype='float',iformat='ar')
else:
alphas,rhos=self.get_contracting_discs(iformat='ar')
if alpha_in or rhos_in:
if isinstance(alphas_in,dict):
for j in alphas_in.keys():
alphas[j-1]=RF(alphas_in[j])
elif isinstance(alphas_in,list):
for j in len(alphas_in):
alphas[j]=RF(alphas_in[j])
else:
raise ValueError,"Got intervals in wrong format! alphas={0}".format(alphas_in)
if isinstance(rhos_in,dict):
for j in rhos_in.keys():
rhos[j-1]=RF(rhos_in[j])
elif isinstance(rhos_in,list):
for j in len(rhos_in):
rhos[j]=RF(rhos_in[j])
else:
raise ValueError,"Got intervals in wrong format! rhos={0}".format(rhos_in)
if self._verbose>1:
print "alphas=",alphas
print "rhos=",rhos
trop_approximation(A,self._Nij,M,self._q, dim, Nmax, s,ll,verbose=self._verbose,approx_type=3,eps=eps,alphas=alphas,rhos=rhos)
return A
def get_eigenvalues(self,s,N=0,h=3,sym=1,delta=1e-7):
r"""
Return the 'verified' eigenvalues of the matrix approximation of self.
"""
evs=[]
if sym==1:
A1plus=self.matrix_approximation(s,N,sym=sym,eps=1)
A1minus=self.matrix_approximation(s,N,sym=sym,eps=-1)
A2plus=self.matrix_approximation(s,N+h,sym=sym,eps=1)
A2minus=self.matrix_approximation(s,N+h,sym=sym,eps=-1)
ev1p = A1plus.eigenvalues(sorted=1)
ev1m = A1minus.eigenvalues(sorted=1)
ev2p = A2plus.eigenvalues(sorted=1)
ev2m = A2minus.eigenvalues(sorted=1)
ev1 = ev1p; ev1.extend(ev1m)
ev2 = ev2p; ev2.extend(ev2m)
else:
A1=self.matrix_approximation(s,N,sym=sym)
A2=self.matrix_approximation(s,N+h,sym=sym)
ev1=A1.eigenvalues(sorted=1)
ev2=A2.eigenvalues(sorted=1)
evs = get_close_values(ev1,ev2,delta,ret_err=0,verbose=self._verbose)
return evs
def get_approximation_Zagier_type(self,s,M,eps=1):
prec = s.parent().prec()
CF = MPComplexField(prec)
CCF = ComplexField(prec)
RF = RealField(prec)
dim=self._dim
MS = MatrixSpace(CF,dim/2*M)
A = Matrix_complex_dense(MS,0)
B = Matrix_complex_dense(MS,0)
xj = {}
if self.lambdaq()<>1:
l = self.lambdaq_r(prec)
else:
l = 1
mpmath.mp.prec = prec
for i in range(M):
arg = RF(2*(i+1)-1)*RF.pi()/RF(4*M)
tmp = arg.sin()**2
#xj[i] = (tmp-RF(0.5))*RF(l)
xj[i] = -tmp
#*RF(l)/RF(2)
#return xj
twos = RF(2)*s
for i in range(dim/2):
pos=1
if i > dim/2-1:
pos = -1
for j in range(dim/2):
n = abs(self._Nij[i,j])
do_zeta=1
if j < dim/2-1 or j > dim/2+1:
do_zeta = 0
for r in range(M):
zarg = CCF(twos.real()+r,twos.imag())
ri = i*M+r
for k in range(M):
kj = j*M+k
if l<>1:
xj_l = xj[k]/l
f = l**-zarg
else:
f=1
xj_l = xj[k]
if do_zeta==1:
z1 = mpmath.mp.zeta(zarg,xj_l+n)*(-1)**r
z2 = mpmath.mp.zeta(zarg,-xj_l+n)
z = z1+eps*z2
z = z*f
else:
z1=((-1)**r)*(xj[k]+n*l)**-zarg
z2=(-xj[k]+n*l)**-zarg
z = z1+eps*z2
B[ri,kj]=CF(z.real,z.imag)
A[ri,kj]=xj[k]**(r) #+eps*(-xj[k])**(k)
return A,B
def _approximation3(self,s,M,dprec=0,one_i=0,check=0,alphas_in={},rhos_in={},verbose=0):
r"""
Compute the approximation to L_s up to order M Using Taylor expansions centered at different discs
INPUT:
- `s` -- complex number
- `M` -- integer
- `dprec` -- integer
- `verbose` -- integer
- `one_i` -- if =1 we use one common interval (i.e. the same as approximation2)
- `check` -- include extra checks
- `alphas_in` -- a list of centers of intervals
- `rhos_in` -- a list of radii of intervals
NOTE: Not cythonized. This is a very slow algorithm!
"""
if hasattr(s,"prec"):
prec = s.prec()
else:
prec = 53
CF = MPComplexField(prec)
CFF = ComplexField(prec)
RF = CF.base()
llambda=self.lambdaq_r(prec)
maxprec=max(prec,dprec)
verbose = max(verbose,self._verbose)
if one_i==1:
alphas=[RF(0) for x in range(self._dim)]
#rhos=[llambda/RF(2) for x in range(self._dim)]
rhos=[RF(1) for x in range(self._dim)]
elif one_i==-1:
alphas=[RF(0) for x in range(self._dim)]
rhos=[llambda/RF(2) for x in range(self._dim)]
else:
alphas,rhos=self.get_markov_partition(itype='float',iformat='ar')
if alphas_in<>{}:
alphas={}
for j in alphas_in.keys():
alphas[j-1]=RF(alphas_in[j])
if rhos_in<>{}:
rhos={}
for j in rhos_in.keys():
rhos[j-1]=RF(rhos_in[j])
## Checking that the order is ok, i.e. that the
if verbose>0:
print "alphas=",alphas
print "rhos=",rhos
if dprec>maxprec:
print "Warning: Can not get more than %s bits of precision!" %(CF.prec())
print "Please redefine the Transfer operator!"
twos=CF(2)*CF(s.real(),s.imag())
dim=self._dim
MS = MatrixSpace(CF,dim*(M+1))
A=Matrix_complex_dense(MS,0) #CF,dim*(M+1))
Z=dict();nr=dict()
mpmath.mp.prec=prec
for i in range(dim):
Z[i]=dict()
for l in range(0,2*M+2):
nr[l]=RF(l)
zarg = CFF(twos.real()+nr[l],twos.imag())
if one_i==1:
for i in range(dim):
z = zeta(zarg)
Z[i][l]=CF(z.real(),z.imag())
else:
for i in range(dim):
if alphas[i]<>0:
z = mpmath.mp.zeta(zarg,alphas[i]/llambda+1)
else:
z = mpmath.mp.zeta(zarg,1)
Z[i][l]=CF(z.real,z.imag)
## Note: alphas[i]<0 if i<dim/2 and >0 if i>dim/2
## and alphas[dim/2+i]=-alphas[i] for 0<=i<dim/2
# we only need ctx.mpf(l) another time for 0<=l<=M
for l in range(M+1,2*M+1):
lr=CF(l)
if verbose>0:
print "dim=",dim
#llambda=CF(self._lambdaq_r.real(),self._lambdaq_r.imag())
for n in range(M+1):
#pow=nr[n]+twos
fakn=RF.factorial(n)**-1
for k in range(M+1):
sg=1
if is_odd(n+k):
sg=-1
for i in range(dim):
for j in range(dim):
B=RF(self._Nij[i,j])
ai = RF(alphas[i])*RF(sign(B))
aj = RF(alphas[j])*RF(sign(B))
ri = rhos[i]; rj=rhos[j]
abB=B.abs()
if verbose>1 and n==0 and i==0:
print "B,|B=",B,abB
if B==0:
AA=CFF(0)
else:
summa = CFF(0)
for l in range(k+1):
poc=mpc_pochammer(twos+l,n)
tmp = poc*CF(binomial(k,l))
tmp = tmp*ai**(k-l)
zarg = CF(twos.real()+l+n,twos.imag())
if 2*j < dim -2 or 2*j>dim:
#z = mpmath.mp.zeta(twos+l+n,aj+llambda*abB)
z = (aj+llambda*abB)**-zarg
else:
if aj<0:
jj = ZZ(j).mod(dim/2)
else:
jj = dim/2+ZZ(j).mod(dim/2)
z = Z[jj][l+n]
if aj==0:
ztmp=CF(1,0)
for a in range(2,abB):
ztmp = ztmp + CF(a,0)**(-zarg)
else:
ztmp=CF(0)
for a in range(1,abB):
ztmp += CF(aj/llambda+RF(a),0)**(-zarg)
if verbose>1:
print "ztmp+[{0}]={1}".format(a,CF(aj/llambda+RF(a),0)**(-zarg))
print "aj/lambda+a=",CF(aj/llambda+RF(a))
z = z - ztmp
if check==1: ## Extra check that we computed the Hurwitz zeta func. correctly
z1 = mpmath.mp.zeta(zarg,aj/llambda+abB)
z1 = CF(z1.real,z1.imag)
if verbose>1:
print "diff=",abs(z-z1)
if abs(z-z1)>1e-10:
print "z0=Z[",jj,"][",l+n,"]=",Z[jj][l+n]
print "z,z1=",z,z1
raise ArithmeticError,"Problem with H-Z!"
z = CF(z.real(),z.imag())
## test for lambda=1 z = z*llambda**(-zarg)
tmp = tmp*z
summa+=tmp
AA=summa
fak=fakn*(ri**-k*rj**n) #*RF.factorial(k).sqrt()/RF.factorial(n).sqrt() #*ri**-k)*(rj**n/RF.factorial(n))
if verbose>2:
print "fak[{0}][{1}]={2}".format(n,k,fak)
AA = AA *fak
AA = CF(AA.real(),AA.imag())
if B>0 and sg<0:
A[i*(M+1)+n,j*(M+1)+k]=-AA
else:
A[i*(M+1)+n,j*(M+1)+k]=AA
return A
def _sym_approximation(self,s,M,eps=1,dprec=0,interval_type=1,check=0,alphas_in={},rhos_in={},verbose=0):
r"""
Compute the approximation to L_s up to order M Using Taylor expansions centered at different discs
INPUT:
- `s` -- complex number
- `M` -- integer
- `sign` -- integer +1 or -1
- `dprec` -- integer
- `verbose` -- integer
- `one_i` -- if =1 we use one common interval (i.e. the same as approximation2)
- `check` -- include extra checks
- `alphas_in` -- a list of centers of intervals
- `rhos_in` -- a list of radii of intervals
NOTE: Not cythonized. This is a very slow algorithm!
"""
if hasattr(s,"prec"):
prec = s.prec()
else:
prec = 53
assert eps in [1,-1]
CF = MPComplexField(prec)
CFF = ComplexField(prec)
RF = CF.base()
llambda=self.lambdaq_r(prec)
maxprec=max(prec,dprec)
verbose = max(verbose,self._verbose)
alphas={};rhos={}
if interval_type==0:
### Use one interval centered at zero for all functions
alphas=[RF(0) for x in range(self._dim)]
rhos=[llambda/RF(2) for x in range(self._dim)]
#rhos=[RF(1) for x in range(self._dim)]
elif interval_type==1:
## Use the markov partition.
alphas,rhos=self.get_markov_partition(itype='float',iformat='ar')
elif interval_type==2:
alphas,rhos=self.get_contracting_discs(iformat='ar')
# USe the contracting discs
elif alphas_in=={} or rhos_in=={}:
raise ValueError,"Must have valid format for intervals! Got alphas={0} and rhos={1}".format(alphas,rhos)
## If we set intervals we apply them now
if alphas_in<>{}:
alphas={}
for j in alphas_in.keys():
alphas[j]=RF(alphas_in[j])
if rhos_in<>{}:
rhos={}
for j in rhos_in.keys():
rhos[j]=RF(rhos_in[j])
if isinstance(alphas,dict):
alphas=list(alphas.values())
if isinstance(rhos,dict):
rhos=list(rhos.values())
dim=self._dim
### Check that alpha and rhos have correct format
if len(alphas)<>dim or len(rhos)<>dim:
raise ValueError,"Must have valid format for intervals! Got alphas={0} and rhos={1}".format(alphas,rhos)
for j in range(dim):
if j<dim/2 and alphas[j]>0 or j>=dim/2 and alphas[j]<0:
raise ValueError,"Must have valid format for intervals! Got alphas={0} and rhos={1}".format(alphas,rhos)
if verbose>0:
print "alphas=",alphas
print "rhos=",rhos
if dprec>maxprec:
print "Warning: Can not get more than %s bits of precision!" %(CF.prec())
print "Please redefine the Transfer operator!"
twos=CF(2)*CF(s.real(),s.imag())
sym_dim=ZZ(dim).divide_knowing_divisible_by(ZZ(2))
MS = MatrixSpace(CF,sym_dim*(M+1))
A=Matrix_complex_dense(MS,0)
Z=dict();nr=dict()
B={}; AA={}
mpmath.mp.prec=prec
#print "A.nrows()=",A.nrows()
#print "A.cols()=",A.ncols()
for i in range(dim):
Z[i]=dict()
for j in range(dim):
Z[i][j]=dict()
lpow={}
for l in range(0,2*M+2):
nr[l]=RF(l)
zarg = CFF(twos.real()+nr[l],twos.imag())
lpow[l]=llambda**-zarg
for i in range(sym_dim):
for j in range(sym_dim):
Z[i][j][l]={}
B[0] = RF(self._Nij[i,j])
B[1] = RF(self._Nij[i,dim-1-j])
assert B[0]>=0
assert B[1]<=0
if 2*j < dim -2 or 2*j>dim:
z = (alphas[i]/llambda+B[0])**-zarg
Z[i][j][l][0]=CF(z.real(),z.imag())
z = (-alphas[i]/llambda-B[1])**-zarg
Z[i][j][l][1]=CF(z.real(),z.imag())
else:
z = mpmath.mp.zeta(zarg,alphas[i]/llambda+B[0])
Z[i][j][l][0]=CF(z.real,z.imag)
z = mpmath.mp.zeta(zarg,-alphas[i]/llambda-B[1])
Z[i][j][l][1]=CF(z.real,z.imag)
#print i,j,l,0,CC(Z[i][j][l][0].real(),Z[i][j][l][0].imag())
#print i,j,l,1,CC(Z[i][j][l][1].real(),Z[i][j][l][1].imag())
# we only need ctx.mpf(l) another time for 0<=l<=M
for l in range(M+1,2*M+1):
lr=CF(l)
if verbose>0:
print "dim=",dim
for n in range(M+1):
fakn=RF.factorial(n)**-1
for k in range(M+1):
sg=1
if is_odd(n+k):
sg=-1
for i in range(sym_dim):
for j in range(sym_dim):
ni = i*(M+1)+n; kj = j*(M+1)+k
ri = rhos[i]; rj=rhos[j]
## Do the positive and negative term separately
B[0] = RF(self._Nij[i,j])
B[1] = RF(self._Nij[i,dim-1-j])
for ii in range(2):
if ii==0:
ai = RF(alphas[i]); aj = RF(alphas[j])
else:
ai = -RF(alphas[i]); aj = RF(alphas[j])
if B[ii]==0:
AA[ii]=CFF(0)
else:
summa=CFF(0)
for l in range(k+1):
poc=mpc_pochammer(twos+l,n)
tmp = poc*CF(binomial(k,l))
z = Z[i][j][l+n][ii]
#if ni==0 and kj==32:
# print "z[",i,j,l+n,ii,"]=",z
tmp = tmp*z*lpow[l+n]*aj**(k-l)
summa+=tmp
#if ni==0 and kj==32:
# print "tmp[",i,j,l+n,ii,"]=",tmp*fakn
if B[ii]>0:
AA[ii]=summa*sg
else:
AA[ii]=summa
#if ni==0 and kj==32:
# print
if verbose>2:
print "fak[{0}][{1}]={2}".format(n,k,fak)
#AA[ii] = AA[ii] *fak
AA[ii] = CF(AA[ii].real(),AA[ii].imag())
fak=fakn*(rj**-k*ri**n) #*RF.factorial(k).sqrt
#if ni==0 and kj==32:
# print "AA[0]=",AA[0]
# print "AA[1]=",AA[1]
A[ni,kj] = AA[0]+eps*AA[1]*(-1)**k
#if ni==0 and kj==32:
# print "A[0,32]=",A[ni,kj]
# print "fak=",fak
A[ni,kj] = A[ni,kj]*fak
return A
def det_1_minus_K(self,s,prec=0):
r"""
Compute the Fredholm determinant det(1-K_s) of the auxiliary operator K_s.
INPUT:
- `s` -- complex number
- `prec` -- integer (default: 0) precision in bits. If prec=0 the precision is determined by the argument s.
ALGORITHM:
We know that each eigenvalue of K_s is of the form mu_n = L**(-2s-2n) where L is explicitly given.
Hence det(1-K_s)=Prod(1-L**(-2s-2n)) where the product is truncated when the relative error is <2**-prec.
"""
if hasattr(s,"prec"):
if prec==0 or prec<s.prec():
prec = s.prec()
else:
if prec==0:
prec = 53
self._prec=prec
self._setup_transfer_operator(prec)
RF=RealField(prec)
CF=ComplexField(prec)
eps = RF(2)**RF(-prec)
ss = CF(s.real(),s.imag())
mp1=RF(1);mp2=RF(2);mp4=RF(4)
twos=mp2*ss
llambda=self.lambdaq_r(prec)
if self._q==3:
L=mp2+self._R
elif self._q==4:
L=mp2.sqrt()+mp1
elif(is_even(self._q)):
L=(mp2+llambda)/(mp4-llambda*llambda).sqrt()
else:
L=(mp2+self._R*llambda)/(mp2-llambda)
l=dict()
K=mp1
for n in range(self._MAXIT):
pow=-twos-mp2*RF(n)
#print "pow[",n,"]=",pow
l[n]=(L**pow)
K=K*(mp1-l[n])
if n>1:
err=max(abs(l[n]),abs(l[n-1]))*abs(K)
#print n,l[n],err
if err<eps:
break
if n > self._MAXIT-2:
raise ArithmeticError,"Could not obtain approximation in %s iteraations. Raise value of self._MAXIT?"
#ctx.dps=old_prec
return K
def Gauss_approximation(self,s,M):
r"""
Try the approximation (for the Gauss map) as suggested by Zagier in 'New points of view on Selberg Zeta function' 2002.
"""
RF = s.parent()
CF = MPComplexField(RF.prec())
xj={}
mpmath.mp.prec = RF.prec()
for j in range(1,M+1):
arg = RF(2*j-1)*RF.pi()/RF(4*M)
tmp = arg.sin()**2
xj[j] = tmp
MS = MatrixSpace(CF,M)
A = Matrix_complex_dense(MS,0)
B = Matrix_complex_dense(MS,0)
twos=s*2
for k in range(1,M+1):
for j in range(1,M+1):
A[k-1,j-1]=xj[j]**(k-1)
#z = mpmath.mp.zeta(xj[j]+1,twos+k-1)
z = mpmath.mp.zeta(twos+k-1,xj[j]+1)
B[k-1,j-1]=CF(z.real,z.imag)
C = A.inverse()*B
return C
def eigenvalues_Gauss(self,s,M,h=3,delta=1e-7):
r"""
Get approximations to the eigenvalues of the Gauss transfer operator
"""
A1 = self._approximation_GaussZ_one_mat(s,M)
A2 = self._approximation_GaussZ_one_mat(s,M+h)
ev1 = A1.eigenvalues(sorted=1)
ev2 = A2.eigenvalues(sorted=1)
if self._verbose>0:
print "ev1=",ev1
print "min(ev1-1)=",min(map(lambda x:abs(x-1),ev1))
print "ev2=",ev2
print "min(ev2-1)=",min(map(lambda x:abs(x-1),ev2))
evs = get_close_values(ev1,ev2,delta,ret_err=0,verbose=self._verbose)
return evs
def get_close_values(l1,l2,delta,test='rel',ret_err=0,verbose=0):
r"""
Take two lists of numbers and returns a list of those numbers in list nr. 2 which are closer than delta to a number in list 1.
INPUT:
- ``l1`` -- lista nr. 1
- ``l2`` -- lista nr. 2
- ``delta`` -- desired error
- ``prec`` -- work with this precision, if elements of l1 do not have a precision.
-``test`` -- is either 'rel' or 'abs' if we check relative, or absolute error
"""
res=[]; used=[]
assert isinstance(l1,list) and isinstance(l2,list)
assert hasattr(l1[0],"prec") and hasattr(l2[0],"prec")
prec1 = l1[0].prec();prec2 = l2[0].prec()
prec = min(prec1,prec2)
eps = 2.0**(1-prec)
for i in range(len(l2)):
used.append(0)
if ret_err==1:
delta_eps = 0
for i in range(len(l1)):
e1=l1[i]
if abs(e1)<eps:
continue
for j in range(len(l2)):
if used[j]==1:
continue
if test=='rel':
err=abs(l1[i]-l2[j])/(abs(l1[i])+abs(l2[j]))
elif test=='abs':
err=abs(l1[i]-l2[j])
else:
raise ValueError,"test must be one of 'rel' or 'abs'!"
if err<delta:
if verbose>0:
print "(rel)err=",err
print "l1[{0}]={1}".format(i,l1[i])
print "l2=[{0}]={1}".format(j,l2[j])
if ret_err==1:
if err > delta_eps:
delta_eps = err
used[j]=1
res.append(l2[j])
if ret_err==1:
return res,delta_eps
else:
return res
def dps_to_prec(dps):
""" Convert number of digits to bits of precision. """
return int(RR(dps*log(10)/log(2)))
def prec_to_dps(prec):
""" Convert bits of precision to number of digits. """
return int(RR(prec*log(2)/log(10)))
def my_alg_sort(x,y):
r""" Sort numbers in a number field."""
if hasattr(x,"complex_embedding"):
xx = x.complex_embedding().real()
else:
xx = x.real()
if hasattr(y,"complex_embedding"):
yy = y.complex_embedding().real()
else:
yy = y.real()
return cmp(xx,yy)
| Python |
from selberg_z import SelbergZeta,TransferOperator
| Python |
def userpass():
import getpass
username = raw_input('Mongodb username: ')
password = getpass.getpass()
return username, password
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
"""
The converter module defines code for converting between Sage objects
and MongoDB documents.
DBConverter is a class whose methods are for converting native Sage
objects into MongoDB documents (JSON-like dictionaries), and
conversely. In addition to the DBConverter, this module defines two
objects: db_converter and to_db. The first, db_converter, is simply
an instance of the DBConverter class. The second, to_db is the method
of the DBConverter class that converts a native Sage object to a
document. Conversion in the other direction, from document to Sage
object, must be done by explicitly calling a method of db_converter.
This is because no type codes are stored in the database document, in
order to not tie the database too tightly to Sage.
"""
class DBConverter:
def from_dirichlet_character(self, e):
zeta_order = int(e.parent().base_ring().zeta_order())
return {'modulus':int(e.modulus()),
'order':int(e.order()),
'even':e.is_even(),
'element':[int(a) for a in e.element()],
'zeta_order':zeta_order}
def to_dirichlet_character(self, character):
if character['order'] == 1:
from sage.all import trivial_character
return trivial_character(character['modulus'])
from sage.all import DirichletGroup, CyclotomicField, QQ
from sage.modular.dirichlet import DirichletCharacter
zeta_order = character['zeta_order']
R = QQ if zeta_order == 2 else CyclotomicField(zeta_order)
G = DirichletGroup(character['modulus'], R, zeta_order=zeta_order)
v = G.an_element().element().parent()(character['element'])
return DirichletCharacter(G, v)
def to_db(self, x):
from sage.modular.dirichlet import DirichletCharacter
from sage.all import Integer
if isinstance(x, DirichletCharacter):
return self.from_dirichlet_character(x)
elif isinstance(x, (Integer, int, long)):
return int(x)
elif x is None:
return x
raise NotImplementedError
db_converter = DBConverter()
to_db = db_converter.to_db
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
"""
This module implements a compressed key to Python object store using MongoDB.
This module defines one class ObjectDB, which we instantiate using a
MongoDB database db. The resulting instance then works somewhat like
a dictionary, except that objects are pickled and stored on disk in
MongoDB.
"""
class ObjectDB:
def __init__(self, db):
from gridfs import GridFS
self.gridfs = GridFS(db)
def __setitem__(self, key, obj):
self.save(obj, key)
def __getitem__(self, key):
return self.load(key)
def __delitem__(self, key):
from pymongo.objectid import ObjectId
if not isinstance(key, ObjectId):
id = self.gridfs.get_last_version(key)._id
else:
id = key
self.gridfs.delete(id)
def __repr__(self):
return "Key-value database"
def keys(self):
"""Return list of filenames of objects in the gridfs store."""
return self.gridfs.list()
def object_ids(self):
"""Return list of id's of objects in the gridfs store, which
are not id's of objects with filenames."""
v = self.gridfs._GridFS__files.find({'filename':{'$exists':False}},['_id'])
return [x['_id'] for x in v]
def has_key(self, key):
return self.gridfs.exists(filename=key)
def save(self, obj, key=None, compress=None):
"""Save Python object obj to the grid file system self.gridfs.
If key is None, the file is stored by MongoDB assigned
ObjectID, and that id is returned.
"""
from sage.all import dumps
data = dumps(obj, compress=compress)
if key is not None:
self.gridfs.put(data, filename=key)
return key
else:
# store by MongoDB assigned _id only, and return that id.
return self.gridfs.put(data)
def load(self, key, compress=True):
from pymongo.objectid import ObjectId
if isinstance(key, ObjectId):
data = self.gridfs.get(key).read()
else:
data = self.gridfs.get_last_version(key).read()
from sage.all import loads
return loads(data, compress=compress)
| Python |
"""
Queries
This file contains code that carries out interesting queries
regarding the database of elliptic curves.
"""
def counts_collection(address='localhost:29000'):
from psage.lmfdb.auth import userpass
user, password = userpass()
from pymongo import Connection
from pymongo.connection import DuplicateKeyError
C = Connection(address).research
C.authenticate(user, password)
return C.ellcurves
def create_counts_table(levels, address, verbose=0):
"""
QUESTION: What proportion of curves in the database have
squarefree conductor, as a function of the conductor?
To answer, make another table ellcurves.counts with documents:
{'_id':N, 'c':number_of_isogeny_classes_of_curves_of_conductor_N, 'ss':True}
where 'ss':True is set only if N is squarefree.
Once we have this table, the rest should be relatively easy.
"""
db_counts = get_ellcurves(address).counts
from sage.all import is_squarefree
i = 0
for N in levels:
N = int(N)
c = ellcurves.find({'level':N, 'number':1}).count()
doc = {'_id':N, 'c':c}
if is_squarefree(N):
doc['ss'] = True
try:
db_counts.insert(doc, safe=True)
except DuplicateKeyError:
if verbose and i%verbose == 0:
print '[%s]'%N,
else:
if verbose and i%verbose == 0:
print N,
i += 1
import sys; sys.stdout.flush()
def counts_intlist(Nmax):
"""
Return an Intlist v such that for N<=Nmax, we have v[N] = # of
isogeny classes of curves of conductor N in the database.
"""
Nmax = int(Nmax)
db_counts = get_ellcurves(address).counts
from sage.all import stats
v = stats.IntList(Nmax + 1)
query = {'_id':{'$lte':Nmax}, 'c':{'$exists':True}}
for c in db_counts.find(query):
v[c['_id']] = c['c']
return v
def create_disc_counts_table(levels, address, verbose=0):
"""
QUESTION: What proportion of curves in the database have
squarefree conductor, as a function of the conductor?
To answer, make another table ellcurves.counts with documents:
{'_id':N, 'c':number_of_isogeny_classes_of_curves_of_conductor_N, 'ss':True}
where 'ss':True is set only if N is squarefree.
Once we have this table, the rest should be relatively easy.
"""
db_counts = get_ellcurves(address).disc_counts
# TODO
| Python |
#################################################################################
#
# (c) Copyright 2011 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from sage.all import mwrank_EllipticCurve
from psage.lmfdb.auth import userpass
def selmer2(a_invariants, max_time=None):
if max_time is None:
E = mwrank_EllipticCurve(a_invariants)
return int(E.selmer_rank())
else:
try:
from sage.all import fork # not in official sage.
except ImportError:
raise ImportError, "You need to apply the patch from http://trac.sagemath.org/sage_trac/ticket/9631"
@fork(timeout=max_time)
def f():
E = mwrank_EllipticCurve(a_invariants)
return int(E.selmer_rank())
return f()
import sage.parallel.ncpus
def populate_db(address, level_min, level_max,
ncpus=sage.parallel.ncpus.ncpus(),
max_time=None):
"""
Compute and insert into the database the 2-selmer ranks of all the
curves in a give range of levels, for which 2-selmer ranks aren't
already known.
"""
user, password = userpass()
import math, random
from sage.all import prime_range, parallel, pari
level_min = int(level_min); level_max = int(level_max)
s = int(math.ceil((level_max - level_min)/float(ncpus)))
blocks = [(level_min+i*s, min(level_max,level_min+(i+1)*s)) for i in range(ncpus)]
@parallel(ncpus)
def f(l_min, l_max):
from pymongo import Connection
C = Connection(address).research
C.authenticate(user, password)
C = C.ellcurves
for v in C.find({'level':{'$gte':level_min, '$lt':level_max},
'sel2':{'$exists':False}}):
sel2 = selmer2(eval(v['weq']), max_time)
C.update({'_id':v['_id']}, {'$set':{'sel2':sel2}})
for ans in f(blocks):
print ans
"""
EXAMPLE QUERIES:
from pymongo import Connection
db = Connection(port=int(29000)).research
e = db.ellcurves
v = e.find({'level':{'$lt':100r}, 'sel2':{'$exists':True}})
sage: v.count()
7
"""
| Python |
#################################################################################
#
# (c) Copyright 2011 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import sage.parallel.ncpus
from psage.lmfdb.auth import userpass
def populate_db(address, level_min, level_max, pmax=100,
ncpus=sage.parallel.ncpus.ncpus()):
"""
Compute and insert into the MongoDB database with the given
address the Fourier coefficients a_p for p up to pmax for the optimal
elliptic curves of the given range of levels (top level not
included), using the given number of threads.
Only curves with ap not yet set are affected by this function.
"""
user, password = userpass()
import math, random
from sage.all import prime_range, parallel, pari
level_min = int(level_min); level_max = int(level_max)
P = prime_range(pmax)
s = int(math.ceil((level_max - level_min)/float(ncpus)))
blocks = [(level_min+i*s, min(level_max,level_min+(i+1)*s)) for i in range(ncpus)]
@parallel(ncpus)
def f(l_min, l_max):
from pymongo import Connection
C = Connection(address).research
C.authenticate(user, password)
C = C.ellcurves
for v in C.find({'level':{'$gte':level_min, '$lt':level_max},
'number':1,
'ap':{'$exists':False}}):
E = pari('ellinit(%s,1)'%v['weq'])
ap = dict([(str(p),int(E.ellap(p))) for p in P])
C.update({'_id':v['_id']}, {'$set':{'ap':ap}})
for ans in f(blocks):
print ans
"""
EXAMPLE QUERIES:
from pymongo import Connection
db = Connection(port=int(29000)).research
e = db.ellcurves
v = e.find({'level':{'$lt':100r}, 'ap.2':-2r}, )
sage: v.count()
7
sage: v = e.find({'level':{'$lt':100r}, 'ap.2':-2r}, ['level', 'weq'])
sage: v.next()
{u'weq': u'[0,-1,1,-10,-20]', u'_id': ObjectId('4c9258841e8b55611895b170'), u'level': 11}
sage: v.next()
{u'weq': u'[0,0,1,-1,0]', u'_id': ObjectId('4c9258841e8b55611895b1bc'), u'level': 37}
sage: v = e.find({'level':{'$lt':100r}, 'ap.2':{'$mod':[int(2),int(0)]}}, ['level', 'weq', 'ap.2', 'ap.3'])
sage: v.next()
{u'ap': {u'3': -1, u'2': -2}, u'weq': u'[0,-1,1,-10,-20]', u'_id': ObjectId('4c9258841e8b55611895b170'), u'level': 11}
"""
| Python |
#################################################################################
#
# (c) Copyright 2011 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
def str_to_apdict(s, labels):
return dict([(labels[a], int(b)) for a,b in enumerate(s.split()) if b != '?'])
def labeled_primes_of_bounded_norm(F, B):
"""
Return a list [prime][letter code] of strings, corresponding to
the primes of F of bounded norm of F, ordered by residue
characteristic (not norm).
"""
from sage.databases.cremona import cremona_letter_code
from psage.modform.hilbert.sqrt5.sqrt5 import primes_of_bounded_norm
labels = []
last_c = 1
number = 0
primes = primes_of_bounded_norm(F, B)
for p in primes:
c = p.smallest_integer() # residue characteristic
if c != last_c:
last_c = c
number = 0
else:
number += 1
labels.append('%s%s'%(c,cremona_letter_code(number)))
return labels, primes
def import_table(address, aplists_txt_filename, max_level=None):
"""
Import a text table of eigenvalues, using upsert to avoid
replication of data.
"""
from psage.lmfdb.auth import userpass
user, password = userpass()
from sage.databases.cremona import cremona_letter_code
from psage.modform.hilbert.sqrt5.sqrt5 import F
labels, primes = labeled_primes_of_bounded_norm(F, 100)
from pymongo import Connection
C = Connection(address).research
if not C.authenticate(user, password):
raise RuntimeError, "failed to authenticate"
e = C.ellcurves_sqrt5
for X in open(aplists_txt_filename).read().splitlines():
if X.startswith('#'):
continue
Nlevel, level, iso_class, ap = X.split('\t')
ap = str_to_apdict(ap, labels)
Nlevel = int(Nlevel)
iso_class = cremona_letter_code(int(iso_class))
v = {'level':level, 'iso_class':iso_class,
'number':1, 'Nlevel':Nlevel, 'ap':ap}
if max_level and Nlevel > max_level: break
print v
spec = dict(v)
del spec['ap']
e.update(spec, v, upsert=True, safe=True)
return e
def aplist(E, B=100):
"""
Compute aplist for an elliptic curve E over Q(sqrt(5)), as a
string->number dictionary.
INPUT:
- E -- an elliptic curve
- B -- a positive integer (default: 100)
OUTPUT:
- dictionary mapping strings (labeled primes) to Python ints,
with keys the primes of P with norm up to B such that the
norm of the conductor is coprime to the characteristic of P.
"""
from psage.modform.hilbert.sqrt5.tables import canonical_gen
v = {}
from psage.modform.hilbert.sqrt5.sqrt5 import F
labels, primes = labeled_primes_of_bounded_norm(F, B)
from sage.all import ZZ
N = E.conductor()
try:
N = ZZ(N.norm())
except:
N = ZZ(N)
for i in range(len(primes)):
p = primes[i]
k = p.residue_field()
if N.gcd(k.cardinality()) == 1:
v[labels[i]] = int(k.cardinality() + 1 - E.change_ring(k).cardinality())
return v
| Python |
#################################################################################
#
# (c) Copyright 2011 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
def ellcurves_sqrt5(address='localhost:29000', username=None, password=None):
from sage.databases.cremona import cremona_letter_code
from psage.modform.hilbert.sqrt5.sqrt5 import F
from aplists import labeled_primes_of_bounded_norm
labels, primes = labeled_primes_of_bounded_norm(F, 100)
from pymongo import Connection
C = Connection(address).research
if username is None or password is None:
from psage.lmfdb.auth import userpass
username, password = userpass()
if not C.authenticate(username, password):
raise RuntimeError, "failed to authenticate"
return C.ellcurves_sqrt5
def find_isogeneous_curves(ellcurves_sqrt5, E):
"""
INPUT:
- ellcurves_sqrt5 -- MongoDB collection
- E -- an elliptic curve over Q(sqrt(5))
OUTPUT:
- cursor iterating over entries in the collection that have
the same good a_p, for p of norm up to 100.
"""
from aplists import aplist
w = aplist(E, 100)
v = dict([('ap.%s'%p, a) for p, a in w.items()])
from psage.modform.hilbert.sqrt5.tables import canonical_gen
v['level'] = str(canonical_gen(E.conductor())).replace(' ','')
return ellcurves_sqrt5.find(v)
| Python |
#################################################################################
#
# (c) Copyright 2011 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from sage.all import EllipticCurve
from psage.modform.hilbert.sqrt5.sqrt5 import F
def r_an(ainvs, eps=1e-4):
E = EllipticCurve(F, ainvs)
L = E.lseries()
D = L.dokchitser(30)
f = D.taylor_series(1)
r = 0
while abs(f[r]) < eps:
r += 1
if D.eps == 1:
assert r%2 == 0
else:
assert r%2 == 1
return r
def r_alg(a_invs):
return EllipticCurve(F, a_invs).rank()
from sage.all import fork
@fork
def rank(a_invs):
try:
return r_alg(a_invs)
except ValueError:
return r_an(a_invs)
def compute_conjectural_ranks(level_norms, address='localhost:29000'):
"""
For each level norm in the input list, compute the
*conjectural* rank of the elliptic curve, and put
that data in a field "r?" in the database.
This uses Simon 2-descent if it works, and otherwise
uses Dokchitser. This should not be considered
super-reliable!
"""
from sage.all import sage_eval
import util
C = util.ellcurves_sqrt5(address)
for N in level_norms:
for E in C.find({'Nlevel':int(N), 'r?':{'$exists':False}, 'weq':{'$exists':True}}):
print E
weq = sage_eval(E['weq'], {'a':F.gen()})
E['r?'] = int(rank(weq))
print E
C.update({'_id':E['_id']}, E, safe=True)
| Python |
#################################################################################
#
# (c) Copyright 2011 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
def import_table(address, table_filename, max_level=None):
"""
Import a text table of weq's, using upsert to avoid
replication of data. Format is like this:
::
31 5*a-2 0 -3 2 -2 2 4 -4 4 -4 -2 -2 ? ? -6 -6 12 -4 6 -2 -8 0 0 16 10 -6 [1,a+1,a,a,0]
31 5*a-3 0 -3 2 -2 2 -4 4 -4 4 -2 -2 ? ? -6 -6 -4 12 -2 6 0 -8 16 0 -6 10 [1,-a-1,a,0,0]
36 6 0 ? ? -4 10 2 2 0 0 0 0 -8 -8 2 2 -10 -10 2 2 12 12 0 0 10 10 [a,a-1,a,-1,-a+1]
"""
from psage.modform.hilbert.sqrt5.sqrt5 import F
from sage.databases.cremona import cremona_letter_code
from aplists import labeled_primes_of_bounded_norm, str_to_apdict
labels, primes = labeled_primes_of_bounded_norm(F, 100)
from psage.lmfdb.auth import userpass
user, password = userpass()
from pymongo import Connection
C = Connection(address).research
if not C.authenticate(user, password):
raise RuntimeError, "failed to authenticate"
e = C.ellcurves_sqrt5
for X in open(table_filename).read().splitlines():
if X.startswith('#'):
continue
z = X.split()
Nlevel = z[0]; level = z[1]; iso_class = z[2]; weq = z[-1]
ap = ' '.join(z[3:-1])
ap = str_to_apdict(ap, labels)
Nlevel = int(Nlevel)
iso_class = cremona_letter_code(int(iso_class))
v = {'level':level, 'iso_class':iso_class,
'number':1, 'Nlevel':Nlevel, 'ap':ap,
'weq':weq}
if max_level and Nlevel > max_level: break
print v
spec = dict(v)
del spec['weq']
e.update(spec, v, upsert=True, safe=True)
| Python |
# Elliptic curves over Q(sqrt(5))
| Python |
# elliptic curves
| Python |
#################################################################################
#
# (c) Copyright 2011 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import sage.parallel.ncpus
from psage.lmfdb.auth import userpass
def populate_db(address, level_min, level_max, num_zeros=100,
ncpus=sage.parallel.ncpus.ncpus()):
"""
Compute and insert into the MongoDB database with the given
address the imaginary parts of the first num_zeros zeros of the
L-series of each optimal elliptic curve in the given level range.
Only curves with L0s not yet set are affected by this function.
The key on the database is "L0s".
"""
user, password = userpass()
import math, random
from sage.all import parallel, EllipticCurve
from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_elliptic_curve
level_min = int(level_min); level_max = int(level_max)
s = int(math.ceil((level_max - level_min)/float(ncpus)))
blocks = [(level_min+i*s, min(level_max,level_min+(i+1)*s)) for i in range(ncpus)]
@parallel(ncpus)
def f(l_min, l_max):
from pymongo import Connection
C = Connection(address).research
C.authenticate(user, password)
C = C.ellcurves
for v in C.find({'level':{'$gte':level_min, '$lt':level_max},
'number':1,
'L0s':{'$exists':False}}):
L = Lfunction_from_elliptic_curve(EllipticCurve(eval(v['weq'])), 10**5)
z = L.find_zeros_via_N(num_zeros)
L0s = dict([(str(i),float(z[i])) for i in range(len(z))])
C.update({'_id':v['_id']}, {'$set':{'L0s':L0s}})
for ans in f(blocks):
print ans
"""
EXAMPLE QUERIES:
from pymongo import Connection
db = Connection(port=int(29000)).research
e = db.ellcurves
v = e.find({'level':{'$lt':100r}, 'L0s':{'$exists':True}}, )
v.count()
7
This counts the number of optimal curves for which the 0-th zero (the
first one) is >1 and the number for which it is < 1.
sage: v = e.find({'level':{'$lt':100r}, 'L0s.0':{'$gt':int(1)}}, )
sage: v.count()
75
sage: v = e.find({'level':{'$lt':100r}, 'L0s.0':{'$lt':int(1)}}, )
sage: v.count()
17
"""
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from mfdb import MFDB
def reload():
reload = __builtins__['reload']
import mfdb; reload(mfdb)
import collection; reload(collection)
import converter; reload(converter)
import newforms; reload(newforms)
import objectdb; reload(objectdb)
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
"""
This module implements a simple key:value store using SQLite3 and
cPickle, and other useful tools built on top of it.
"""
import cPickle, sqlite3, zlib
# A key:value store
class SQLiteKeyValueStore:
def __init__(self, file, compress=False):
"""
Create or open the SQLite3-based key:value database stored in the given file.
INPUTS:
- file -- string; the name of a file.
- compress -- bool (default: False); if True, by default compress all
pickled values using zlib
You do not have to be consistent with the compress option. The database will still
work if you switch back and forth between compress=True and compress=False.
"""
self._db = sqlite3.connect(file)
self._cursor = self._db.cursor()
self._file = file
self._compress = compress
try:
self._cursor.execute("select * from sqlite_master").next()
except StopIteration:
# This exception will occur if the database is brand new (has no tables yet)
try:
self._cursor.execute("CREATE TABLE cache (key BLOB, value BLOB, compressed INTEGER, UNIQUE(key))")
self._cursor.execute("CREATE INDEX cache_idx ON cache(key)")
self._db.commit()
except sqlite3.OperationalError:
pass # failure could happen if another process maybe created
# and initialized the database at the same time. That's fine.
def __del__(self):
"""Called when the database is freed to close the connection."""
self._db.close()
def __repr__(self):
"""String representation of the database."""
return "SQLite3-based key:value database stored in '%s'"%self._file
def has_key(self, key):
"""Returns True if database has the given key."""
return self._cursor.execute( "SELECT count(*) FROM cache WHERE key=?", (self._dumps(key),) ).next()[0] > 0
def __getitem__(self, key):
"""Return item in the database with given key, or raise KeyError."""
s = self._cursor.execute( "SELECT value,compressed FROM cache WHERE key=?", (self._dumps(key),) )
try:
v = s.next()
return self._loads(str(v[0]), bool(v[1]))
except StopIteration:
raise KeyError, str(key)
def __setitem__(self, key, value):
"""Sets an item in the database. Call commit to make this permanent."""
self._cursor.execute("INSERT OR REPLACE INTO cache VALUES(?, ?, ?)", (
self._dumps(key), self._dumps(value, self._compress), self._compress))
def __delitem__(self, key):
"""Removes an item from the database. Call commit to make this permanent."""
self._cursor.execute("DELETE FROM cache WHERE key=?", (self._dumps(key),) )
def _dumps(self, x, compress=False):
"""Converts a Python object to a binary string that can be stored in the database."""
s = cPickle.dumps(x,2)
if compress:
s = zlib.compress(s)
return sqlite3.Binary(s)
def _loads(self, x, compress=False):
"""Used internally to turn a pickled object in the database into a Python object."""
if compress:
x = zlib.decompress(x)
return cPickle.loads(x)
def keys(self):
"""Return list of keys in the database."""
return [self._loads(str(x[0])) for x in self._cursor.execute( "SELECT key FROM cache" )]
def commit(self):
"""Write assignments made to the database to disk."""
self._db.commit()
def test_sqlite_keyval_1():
"""A straightforward test."""
import tempfile
file = tempfile.mktemp()
try:
for compress in [False, True]:
db = SQLiteKeyValueStore(file, compress)
db[2] = 3
db[10] = {1:5, '17a':[2,5]}
assert db.keys() == [2,10]
assert db[10] == {1:5, '17a':[2,5]}
assert db[2] == 3
db.commit()
db[5] = 18 # does not get committed
db = SQLiteKeyValueStore(file, not compress)
assert db.keys() == [2,10]
assert db[10] == {1:5, '17a':[2,5]}
assert db[2] == 3
assert db.has_key(2)
assert not db.has_key(3)
del db
import os; os.unlink(file)
finally:
if os.path.exists(file):
import os; os.unlink(file)
# A SQLite cached function decorator
class sqlite_cached_function:
"""
Use this like so::
@sqlite_cached_function('/tmp/foo.sqlite', compress=True)
def f(n,k=5):
return n+k
Then whenever you call f, the values are cached in the sqlite
database /tmp/foo.sqlite. This will persist across different
sessions, of course. Moreover, f.db is the underlying
SQLiteKeyValueStore and f.keys() is a list of all keys computed
so far (normalized by ArgumentFixer).
"""
def __init__(self, file, compress=False):
self.db = SQLiteKeyValueStore(file, compress=compress)
def __call__(self, f):
"""Return decorated version of f."""
from sage.misc.function_mangling import ArgumentFixer
A = ArgumentFixer(f)
def g(*args, **kwds):
k = A.fix_to_named(*args, **kwds)
try:
return self.db[k]
except KeyError: pass
x = self.db[k] = f(*args, **kwds)
self.db.commit()
return x
def keys():
return self.db.keys()
g.keys = keys
g.db = self.db
return g
def test_sqlite_cached_function_1():
try:
import tempfile
file = tempfile.mktemp()
@sqlite_cached_function(file)
def f(a, b=10):
return a + b
assert f(2) == 12
assert f(2,4) == 6
assert f(2) == 12
assert f(2,4) == 6
finally:
import os; os.unlink(file)
def test_sqlite_cached_function_2():
try:
from sage.all import sleep, walltime
import tempfile
file = tempfile.mktemp()
@sqlite_cached_function(file, compress=True)
def f(a, b=10):
sleep(1)
return a + b
f(2)
f(2,b=4)
t = walltime()
assert f(2) == 12
assert f(b=4,a=2) == 6
assert walltime() - t < 1, "should be fast!"
# Make new cached function, which will now use the disk cache first.
@sqlite_cached_function(file, compress=True)
def f(a, b=10):
sleep(1)
t = walltime()
assert f(2) == 12
assert f(b=4,a=2) == 6
assert walltime() - t < 1, "should be fast!"
finally:
import os; os.unlink(file)
def test_sqlite_cached_function_3():
import tempfile
file = tempfile.mktemp()
try:
from sage.all import parallel, sleep
# This "nasty" test causes 10 processes to be spawned all at once,
# and simultaneously try to initialize and write to the database,
# repeatedly. This tests that we're dealing with concurrency robustly.
@parallel(10)
def f(a, b=10):
@sqlite_cached_function(file)
def g(a, b):
sleep(.5)
return a + b
return g(a, b)
for X in f(range(1,30)):
assert X[1] == X[0][0][0] + 10
finally:
import os; os.unlink(file)
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
"""
This module implements the ModularFormsDataBase (MFDB) class.
The MFDB class represents a connection to an MongoDB modular forms
database running on a local or remote server. It is instantianted
with a hostname and port, and just makes a MongoDB connection with
that port, then grabs a reference to the mfdb database there.
The newforms method returns a Python object that can be used to query
the database about classical GL2 newforms over QQ, and compute new
data about such newforms.
The backup method backs up the whole mfdb database.
"""
class MFDB:
def __init__(self, host='localhost', port=29000):
# Open conection to the MongoDB
from pymongo import Connection
self.connection = Connection(host, port)
self.db = self.connection.mfdb
self.port = port
self.host = host
from objectdb import ObjectDB
self.objectdb = ObjectDB(self.db)
def __repr__(self):
return "Modular Forms Database\n%s"%self.connection
def newforms(self):
"""Returns object that can be used for querying about GL2
newforms over QQ and populating the database with them."""
from newforms import NewformCollection
return NewformCollection(self.db.newforms, self)
def backup(self, outdir=None):
"""Dump the whole database to outdir. If outdir is None,
dumps to backup/year-month-day-hour-minute."""
import os
if outdir is None:
import time
outdir = os.path.join('backup',time.strftime('%Y%m%d-%H%M'))
cmd = 'time mongodump -h %s:%s -d mfdb -o "%s"'%(
self.host, self.port, outdir)
print cmd
os.system(cmd)
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
"""
This module defines a light wrapper around a MongoDB collection in the
MFDB database.
"""
class Collection:
def __init__(self, collection, db):
self.collection = collection
self.db = db
def backup(self, outdir=None):
"""Dump this collection to outdir. If outdir is None,
dumps to backup/year-month-day-hour-minute."""
import os
if outdir is None:
import time
outdir = os.path.join('backup',time.strftime('%Y%m%d-%H%M'))
cmd = 'time mongodump -c "%s" -h %s:%s -d mfdb -o "%s"'%(
self.collection.name, self.db.host, self.db.port, outdir)
print cmd
os.system(cmd)
def find(self, *args, **kwds):
"""Perform a query on the collection. See the help for self.collection.find."""
return self.collection.find(*args, **kwds)
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
"""
This module implement a class that represents the collection of
newforms.
The code in this module defines classes both for working with the
collection of all newforms in the database, and for populating this
collection with new data about newforms. The NewformCollection
class is instantiated by the main MFDB database object.
The classes for populating the newforms table further or in
turn instantiated by the NewformCollection object.
The newform collection has documents that contain data about newforms.
It also has a subcollection 'counts', which records the number of
newforms with given level, weight, and character. """
from collection import Collection
class NewformCollection(Collection):
"""
"""
def __repr__(self):
return "Collection of newforms"
def count(self):
"""Return number of newforms in the newforms collection."""
return self.find().count()
def spaces(self, key=0):
"""Return sorted (by either level (key=0), weight (key=1), etc.)
list of triples
(level, weight, character.order(), count)
for which all count newforms in the corresponding space are known."""
key = int(key)
C = self.collection.counts
# Query C for the 4-tuples, as described in the docstring above.
Q = [(x['level'], x['weight'], x['character']['order'], x['count']) for x in
C.find({},['level','weight','character.order','count'])]
Q.sort(lambda x,y: cmp(x[key],y[key]))
return Q
def normalize(self, level, weight, character):
"""
Return normalized level, weight, character, and a MongoDB
document representing them. Normalized means the level and
weight are Python ints, and the character is a Sage Dirichlet
character (in particular, it is not None).
"""
from converter import to_db
level = to_db(level)
weight = to_db(weight)
if character is None:
from sage.all import trivial_character
character = trivial_character(level)
e = to_db(character)
return level, weight, character, {'level':level, 'weight':weight, 'character':e}
def populate_newform_eigenvalue_field(self):
"""Return object that organizes populating the eigenvalue field
property of newform documents."""
return PopulateNewformEigenvalueField(self)
def populate_newforms(self):
"""Return object that organizes populating the newform
documents. Get this object if you want to add new newforms to
the database."""
return PopulateNewforms(self)
from populate import Populate
class PopulateNewforms(Populate):
def __repr__(self):
return "Populate Newforms"
def count(self):
"""Return number of newforms in the database (with degree field set)."""
return self.collection.newforms.find({'degree':{'$exists':True}}).count()
def populate(self, level, weight=2, character=None, verbose=True):
nc = self.collection # newform collection
level, weight, character, D = nc.normalize(level, weight, character)
if verbose: print D
# Check the counts subcollection
if nc.collection.counts.find(D).count() > 0:
# Don't bother
if verbose: print "Skipping since counts subcollection already has an entry."
return
from psage.modform.rational.newforms import degrees
degs = degrees(level, weight, character)
for num, d in enumerate(degs):
deg = int(d)
# Update the document for the given newform
query = dict(D)
query['number'] = num
nc.collection.update(query, {'$set':{'degree':deg}},
upsert=True, safe=True)
D['count'] = len(degs)
nc.collection.counts.insert(D, safe=True)
def populate_all_characters(self, level, weight, verbose=True):
from sage.all import DirichletGroup
G = DirichletGroup(level)
B = G.galois_orbits()
B = [character[0].minimize_base_ring() for character in B]
for character in B:
self.populate(level, weight, character, verbose)
def populate_quadratic_characters(self, level, weight, verbose=True):
from sage.all import DirichletGroup, QQ
G = DirichletGroup(level,QQ)
B = G.galois_orbits()
B = [character[0].minimize_base_ring() for character in B
if character[0].order()==2]
for character in B:
self.populate(level, weight, character, verbose)
class PopulateNewformEigenvalueField(Populate):
def __repr__(self):
return "Populating newform Hecke eigenvalue fields"
def count(self):
"""Return number of newforms with eigenvalue field computed."""
return self.collection.collection.find({'eigenvalue_field':{'$exists':True}}).count()
def populate_one(self, verbose=True):
"""
Compute Hecke eigenvalues for one unknown level,weight,character.
If all data is known, raise a ValueError.
"""
A = self.collection.collection.find_one({'eigenvalue_field':{'$exists':False}})
if A is None:
raise ValueError, "All Hecke eigenvalue fields are currently known."
from converter import db_converter
self.populate(A['level'], A['weight'],
db_converter.to_dirichlet_character(A['character']),
verbose=verbose)
def populate(self, level, weight, character=None, verbose=True):
nc = self.collection
level, weight, character, D = nc.normalize(level, weight, character)
if verbose: print D
C = nc.collection.counts.find(D)
if C.count() == 0:
if verbose: print "Skipping -- no newforms known yet (counts=0)"
return
cnt = C.next()['count']
if cnt == 0:
# There are no newforms, so don't do any further work.
if verbose: print "No newforms"
return
# Now check to see if all of the eigenvalue fields are known
# by doing a query for all forms of the given level, weight, and
# character for which the eigenvalue_field key is set.
E = dict(D)
E['eigenvalue_field'] = {'$exists':True}
if nc.collection.find(E).count() == cnt:
if verbose: print "All eigenvalue fields already known"
return
from psage.modform.rational.newforms import eigenvalue_fields
from sage.all import set_random_seed, QQ
set_random_seed(0)
fields = eigenvalue_fields(level, weight, character)
for num, K in enumerate(fields):
# Update the document for the given newform
query = dict(D)
query['number'] = num
if K == QQ:
f = 'x-1'
else:
f = str(K.defining_polynomial()).replace(' ','')
if verbose:
print f
nc.collection.update(query, {'$set':{'eigenvalue_field':f}}, safe=True)
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
"""
This module implements an object that is used to coordinate populating
the database.
This is an abstract base class for other classes, e.g., for populating
the database with newforms.
"""
class Populate:
def __init__(self, collection):
self.collection = collection
def percent_done(self):
return 100*float(self.count()) / self.collection.count()
def populate_all(self, verbose=True):
while True:
if self.count() == self.collection.count():
break
d = self.percent_done()
if verbose: print "Percent done: %.2f%%"%d
self.populate_one(verbose=verbose)
| Python |
"""
(c) Copyright 2009-2010 Salman Baig and Chris Hall
This file is part of ELLFF
ELLFF is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ELLFF is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#######################
# ELLFF DATABASE CODE #
#######################
import sage.databases.db
from sage.rings.all import PolynomialRing, GF
class jCurveDatabase(sage.databases.db.Database):
def __init__(self, read_only=True):
"""
Initialize the database.
INPUT:
- ``read_only`` - bool (default: True), if True, then
the database is read_only and changes cannot be committed to
disk.
"""
sage.databases.db.Database.__init__(self, name='jcurve_euler_tables', read_only=read_only)
# may not need __getitem__
"""
def __getitem__(self, key):
If key=q is an integer, return all data about FF_q(t) in the database.
If key=(q,n) is a pair of integers, return corresponding euler table,
if it is in the database.
INPUT:
- ``key`` - int or list of two ints
OUTPUT: dict (if key is an int) or list (if key is a list)
if isinstance(key, list) and len(key) > 1:
return sage.databases.db.Database.__getitem__(self, key[0])[key[1]]
return sage.databases.db.Database.__getitem__(self, key)
"""
def __repr__(self):
"""
String representation of this database. OUTPUT: str
"""
return 'Database of euler tables for the versal j-curve'
# TODO: which q are in db, which n are in db given q
def _update_(self, q, n, table, force=False, verbose=False):
r"""
Upate the database for self over $\mathbb{F}_{q^n}$, forcing
overwrite if so desired. If the table already exists and is
forced to be overwritten, the two tables are compared for
equality. If they are not equal, the old table is replaced
with the new one.
INPUT:
- q -- an integer the size of F_q
- n -- the degree of the extension of F_q
- force -- boolean that forces overwrite
"""
if self.read_only:
raise RuntimeError, 'The database must not be read_only.'
if self.has_key(q):
if self[q].has_key(n) and force:
if verbose:
print 'Already have this table; forcing overwrite'
if not self[q][n] == table:
print 'Tables mismatch; replacing preexisting table with new given one'
self[q][n] = table
self.changed(q)
# self[q][n] = self[q][n] # so database knows that self[q][n] changed
else:
self[q][n] = table
self.changed(q)
# self[q][n] = self[q][n] # so database knows that self[q][n] changed
else:
self[q] = {}
self[q][n] = table
self.changed(q)
# self[q][n] = self[q][n] # so database knows that self[q][n] changed
self.commit()
_jdb = None
def jCurveEulerTables(read_only=False):
r"""
Create the database of euler factors for the versal j curve.
"""
global _jdb
if _jdb != None:
return _jdb
if _jdb == None:
_jdb = jCurveDatabase(read_only)
return _jdb
class LocalEulerDatabase(sage.databases.db.Database):
def __init__(self, read_only=True):
"""
Initialize the database.
INPUT:
- ``read_only`` - bool (default: True), if True, then
the database is read_only and changes cannot be committed to
disk.
"""
sage.databases.db.Database.__init__(self, name='local_euler_tables', read_only=read_only)
def __repr__(self):
"""
String representation of this database. OUTPUT: str
"""
return 'Database of euler tables for user curves'
# TODO: which q are in db, which n are in db given q, which ainvs are in db
def _update_(self, ainvs, q, n, table, force=False, verbose=False):
r"""
Upate the database for self over $\mathbb{F}_{q^n}$, forcing
overwrite if so desired. If the table already exists and is
forced to be overwritten, the two tables are compared for
equality. If they are not equal, the old table is replaced
with the new one.
INPUT:
- q -- an integer the size of F_q
- n -- the degree of the extension of F_q
- force -- boolean that forces overwrite
"""
if self.read_only:
raise RuntimeError, 'The database must not be read_only.'
if self.has_key(ainvs):
if self[ainvs].has_key(q):
if self[ainvs][q].has_key(n):
if verbose:
print 'Already have this table;',
if force:
if verbose:
print 'forcing overwrite'
if not self[ainvs][q][n] == table:
print 'Tables mismath; replacing preexisting table with new given one'
self[ainvs][q][n] = table
self.changed(ainvs)
else:
self[ainvs][q][n] = table
self.changed(ainvs)
else:
self[ainvs][q] = {}
self[ainvs][q][n] = table
self.changed(ainvs)
else:
self[ainvs] = {}
self[ainvs][q] = {}
self[ainvs][q][n] = table
self.changed(ainvs)
self.commit()
_ldb = None
def LocalEulerTables(read_only=False):
r"""
Create the database of euler factors for the `user` curve
"""
global _ldb
if _ldb != None:
return _ldb
if _ldb == None:
_ldb = LocalEulerDatabase(read_only)
return _ldb
def _save_euler_table(self, n, verbose=False):
r"""
Save the euler table for self over the degree n extension of
$\mathbb{F}_q$ to disk. This is currently implemented with
sage.database.db, which uses ZODB. If self is the versal j-curve,
it stores the table in the database
SAGE_ROOT/data/jcurve_euler_tables .
Otherwise, the tables are stored in the `user` table
SAGE_ROOT/data/local_euler_tables .
It currently doesn't check if the table already is stored; it
merely writes over it in that case. This should eventually be
implemented using MongoDB.
INPUT:
- n -- the degree of the extension of F_q
EXAMPLES::
sage: import psage
sage: K.<t> = psage.FunctionField(GF(11))
sage: E = psage.ellff_EllipticCurve(K,[0,0,0,-27*t/(t-1728),54*t/(t-1728)])
sage: E._build_euler_table(1)
sage: E._euler_table(1)
[0, 0, 4, -6, 3, 5, 1, -2, 4, -2, 3, 1]
sage: E._build_euler_table(2)
sage: E._build_euler_table(3)
sage: E._euler_table(1)
[0, 0, 4, -6, 3, 5, 1, -2, 4, -2, 3, 1]
sage: E._save_euler_table(1)
sage: E._save_euler_table(2)
sage: E._save_euler_table(3)
"""
import os
SAGE_ROOT = os.environ['SAGE_ROOT']
K = self.K
R = self.R
t = K.gens()[0]
p = self.p
d = self.d
q = self.q
R2 = PolynomialRing(GF(q), 's')
a1n = R2(0)
a1d = R2(1)
a2n = R2(0)
a2d = R2(1)
a3n = R2(0)
a3d = R2(1)
a4n = R2(self.a4.numerator().coeffs())
a4d = R2(self.a4.denominator().coeffs())
a6n = R2(self.a6.numerator().coeffs())
a6d = R2(self.a6.denominator().coeffs())
ainvs = [0, 0, 0, self.a4, self.a6]
ainvs_pairs = ((a1n, a1d), (a2n, a2d), (a3n, a3d), (a4n, a4d), (a6n, a6d))
# recognize if self is j-curve and use special repository
if ainvs == [0,0,0,-27*t*(t-1728)**3,54*t*(t-1728)**5]:
if verbose:
print 'j-curve recognized; saving euler table to database'
if not os.path.exists(SAGE_ROOT + '/data/jcurve_euler_tables/jcurve_euler_tables'):
print 'Database does not exist; starting a new one'
if not os.path.exists(SAGE_ROOT + '/data/jcurve_euler_tables'):
os.makedirs(SAGE_ROOT + '/data/jcurve_euler_tables/')
filedb = open(SAGE_ROOT + '/data/jcurve_euler_tables/jcurve_euler_tables', "wb")
filedb.close()
euler_db = jCurveEulerTables(read_only = False)
euler_db._update_(q, n, self._euler_table(n))
if verbose:
print euler_db.as_dict()
euler_db.commit()
# work with user's repository of euler tables
else:
if not os.path.exists(SAGE_ROOT + '/data/local_euler_tables/local_euler_tables'):
print 'Database does not exist; creating a new one'
if not os.path.exists(SAGE_ROOT + '/data/local_euler_tables'):
os.makedirs(SAGE_ROOT + '/data/jcurve_euler_tables/')
filedb = open(SAGE_ROOT + '/data/local_euler_tables/local_euler_tables', "wb")
filedb.close()
local_euler_db = LocalEulerTables(read_only = False)
local_euler_db._update_(ainvs_pairs, q, n, self._euler_table(n))
if verbose:
print local_euler_db.as_dict()
local_euler_db.commit()
def _load_euler_table(self, n, force=False, verbose=False):
r"""
Load the euler table for self over the degree n extension of
$\mathbb{F}_q$ to disk. If self is the versal j-curve, the table
is pulled from
SAGE_ROOT/data/jcurve_euler_tables .
Otherwise, the table is pulled from the `user` table
SAGE_ROOT/data/local_euler_tables .
This should eventually be implemented using MongoDB.
It currently doesn't check if the key exist. If the key doesn't
exist, a RuntimeError is raised by sage.database.db. This
RuntimeError should be sufficient, so key checking may not be
necessary.
INPUT:
- n -- the degree of the extension of F_q
- force -- boolean that overwrites self's euler table with
one from database
EXAMPLES::
sage: import psage
sage: K.<t> = psage.FunctionField(GF(11))
sage: E = psage.ellff_EllipticCurve(K,[0,0,0,-27*t/(t-1728),54*t/(t-1728)])
sage: E._euler_table(1)
Traceback (most recent call last):
...
RuntimeError: table is empty
sage: E._load_euler_table(1)
sage: E._euler_table(1)
[0, 0, 4, -6, 3, 5, 1, -2, 4, -2, 3, 1]
"""
import os
SAGE_ROOT = os.environ['SAGE_ROOT']
K = self.K
R = self.R
t = K.gens()[0]
p = self.p
d = self.d
q = self.q
R2 = PolynomialRing(GF(q), 's')
s = R2.gens()[0]
a1n = R2(0)
a1d = R2(1)
a2n = R2(0)
a2d = R2(1)
a3n = R2(0)
a3d = R2(1)
a4n = R2(self.a4.numerator().coeffs())
a4d = R2(self.a4.denominator().coeffs())
a6n = R2(self.a6.numerator().coeffs())
a6d = R2(self.a6.denominator().coeffs())
ainvs = [0, 0, 0, self.a4, self.a6]
ainvs_pairs = ((a1n, a1d), (a2n, a2d), (a3n, a3d), (a4n, a4d), (a6n, a6d))
# recognize if self is j-curve and use special repository
if ainvs == [0,0,0,-27*t*(t-1728)**3,54*t*(t-1728)**5]:
if verbose:
print 'j-curve recognized; saving euler table to database'
if not os.path.exists(SAGE_ROOT + '/data/jcurve_euler_tables/jcurve_euler_tables'):
print 'Database does not exist; cannot load from it'
else:
euler_db = jCurveEulerTables()
# check that keys exist?
self._set_euler_table(n, euler_db[q][n], force)
# work with user's repository of euler tables
else:
if not os.path.exists(SAGE_ROOT + '/data/local_euler_tables/local_euler_tables'):
print 'Database does not exist; cannot load from it'
else:
local_euler_db = LocalEulerTables()
# check that keys exist?
self._set_euler_table(n, local_euler_db[ainvs_pairs][q][n], force)
| Python |
from ellff import ellff_EllipticCurve
| Python |
#################################################################################
#
# (c) Copyright 2010 William Stein
#
# This file is part of PSAGE
#
# PSAGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import os, sys
if sys.maxint != 2**63 - 1:
print "*"*70
print "The PSAGE library only works on 64-bit computers. Terminating build."
print "*"*70
sys.exit(1)
import build_system
SAGE_ROOT = os.environ['SAGE_ROOT']
SAGE_LOCAL = os.environ['SAGE_LOCAL']
INCLUDES = ['%s/%s/'%(SAGE_ROOT,x) for x in
('devel/sage/sage/ext', 'devel/sage', 'devel/sage/sage/gsl')] \
+ ['%s/%s/'%(SAGE_LOCAL,x) for x in
('include/csage', 'include', 'include/python')]
if '-ba' in sys.argv:
print "Rebuilding all Cython extensions."
sys.argv.remove('-ba')
FORCE = True
else:
FORCE = False
def Extension(*args, **kwds):
if not kwds.has_key('include_dirs'):
kwds['include_dirs'] = INCLUDES
else:
kwds['include_dirs'] += INCLUDES
if not kwds.has_key('force'):
kwds['force'] = FORCE
# Disable warnings when running GCC step -- cython has already parsed the code and
# generated any warnings; the GCC ones are noise.
if not kwds.has_key('extra_compile_args'):
kwds['extra_compile_args'] = ['-w']
else:
kwds['extra_compile_args'].append('-w')
E = build_system.Extension(*args, **kwds)
E.libraries = ['csage'] + E.libraries
return E
numpy_include_dirs = [os.path.join(SAGE_LOCAL,
'lib/python/site-packages/numpy/core/include')]
ext_modules = [
Extension("psage.ellff.ellff",
["psage/ellff/ellff.pyx",
"psage/ellff/ell.cpp",
"psage/ellff/ell_surface.cpp",
"psage/ellff/euler.cpp",
"psage/ellff/helper.cpp",
"psage/ellff/jacobi.cpp",
"psage/ellff/lzz_pEExtra.cpp",
"psage/ellff/lzz_pEratX.cpp"],
language = 'c++'),
Extension("psage.function_fields.function_field_element",
["psage/function_fields/function_field_element.pyx"]),
Extension("psage.modform.jacobiforms.jacobiformd1nn_fourierexpansion_cython",
["psage/modform/jacobiforms/jacobiformd1nn_fourierexpansion_cython.pyx"]),
Extension("psage.modform.siegel.fastmult",
["psage/modform/siegel/fastmult.pyx"]),
Extension('psage.modform.maass.mysubgroups_alg',
['psage/modform/maass/mysubgroups_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modform.maass.maass_forms_alg',
['psage/modform/maass/maass_forms_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs),
Extension("psage.modform.rational.modular_symbol_map",
["psage/modform/rational/modular_symbol_map.pyx"]),
Extension("psage.modform.rational.padic_elliptic_lseries_fast",
["psage/modform/rational/padic_elliptic_lseries_fast.pyx"]),
Extension("psage.modform.hilbert.sqrt5.sqrt5_fast",
["psage/modform/hilbert/sqrt5/sqrt5_fast.pyx"],
libraries = ['ntl', 'gmp'],
language = 'c++'),
Extension("psage.ellcurve.lseries.sqrt5",
["psage/ellcurve/lseries/sqrt5.pyx"],
libraries = ['ntl', 'gmp'],
language = 'c++'),
Extension("psage.ellcurve.lseries.helper",
["psage/ellcurve/lseries/helper.pyx"]),
Extension('psage.ellcurve.galrep.wrapper',
sources = ['psage/ellcurve/galrep/wrapper.pyx', 'psage/ellcurve/galrep/galrep.c'],
libraries = ['gmp']),
Extension('psage.ellcurve.minmodel.sqrt5',
sources = ['psage/ellcurve/minmodel/sqrt5.pyx'],
libraries = ['gmp']),
Extension('psage.rh.mazur_stein.game',
sources = ['psage/rh/mazur_stein/game.pyx']),
Extension('psage.rh.mazur_stein.book_cython',
sources = ['psage/rh/mazur_stein/book_cython.pyx']),
Extension("psage.ellcurve.lseries.fast_twist",
["psage/ellcurve/lseries/fast_twist.pyx"],
libraries = ['gsl']),
Extension("psage.ellcurve.lseries.aplist_sqrt5",
["psage/ellcurve/lseries/aplist_sqrt5.pyx"],
language = 'c++'),
Extension("psage.number_fields.sqrt5.prime",
["psage/number_fields/sqrt5/prime.pyx"],
libraries = ['pari']),
Extension("psage.modform.rational.special_fast",
["psage/modform/rational/special_fast.pyx", SAGE_ROOT + "/devel/sage/sage/libs/flint/fmpq_poly.c"],
libraries = ['gmp', 'flint'],
language = 'c++',
include_dirs = [SAGE_LOCAL + '/include/FLINT/', SAGE_ROOT + '/devel/sage/sage/libs/flint/'],
extra_compile_args = ['-std=c99']),
Extension("psage.ellcurve.xxx.rankbound",
sources = [ 'psage/ellcurve/xxx/rankbound.pyx',
'psage/ellcurve/xxx/rankbound_.cc',
'psage/ellcurve/xxx/mathlib.cc',
'psage/libs/smalljac/wrapper_g1.c'],
libraries = ['gmp', 'm'],
include_dirs = ['psage/libs/smalljac/'],
language = 'c'
)
]
## Fredrik Stroemberg: my additional modules.
from sage.misc.package import is_package_installed
if is_package_installed('mpc') and is_package_installed('mpmath'):
my_extensions = [
Extension('psage.modform.maass.inc_gamma',
['psage/modform/maass/inc_gamma.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modform.maass.mysubgroups_alg',
['psage/modform/maass/mysubgroups_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modform.maass.permutation_alg',
['psage/modform/maass/permutation_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modform.maass.pullback_algorithms',
['psage/modform/maass/pullback_algorithms.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs),
Extension('psage.modform.maass.automorphic_forms_alg',
['psage/modform/maass/automorphic_forms_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs),
Extension('psage.modform.maass.hilbert_modular_group_alg',
['psage/modform/maass/hilbert_modular_group_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs),
Extension('psage.zfunctions.selberg_z_alg',
['psage/zfunctions/selberg_z_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs),
Extension('psage.modform.maass.vv_harmonic_weak_maass_forms_alg',
['psage/modform/maass/vv_harmonic_weak_maass_forms_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs),
Extension('psage.rings.mpc_extras',
sources = ['psage/rings/mpc_extras.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
#Extension('psage.modform.maass.maass_forms_alg',
# sources=['psage/modform/maass/maass_forms_alg.pyx'],
# libraries = ['m','gmp','mpfr','mpc'],
# include_dirs = numpy_include_dirs),
Extension('psage.modform.maass.maass_forms_phase2',
sources=['psage/modform/maass/maass_forms_phase2.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs),
Extension('psage.modform.maass.lpkbessel',
['psage/modform/maass/lpkbessel.pyx']),
Extension('psage.modules.vector_complex_dense',
sources = ['psage/modules/vector_complex_dense.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modules.vector_real_mpfr_dense',
sources = ['psage/modules/vector_real_mpfr_dense.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modules.weil_module_alg',
sources = ['psage/modules/weil_module_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.matrix.matrix_complex_dense',
sources = ['psage/matrix/matrix_complex_dense.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.matrix.linalg_complex_dense',
sources = ['psage/matrix/linalg_complex_dense.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modform.maass.poincare_series_alg',
['psage/modform/maass/poincare_series_alg.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modform.maass.poincare_series_alg_vv',
['psage/modform/maass/poincare_series_alg_vv.pyx'],
libraries = ['m','gmp','mpfr','mpc']),
Extension('psage.modform.maass.eisenstein_series',
['psage/modform/maass/eisenstein_series.pyx'],
libraries = ['m','gmp','mpfr','mpc'],
include_dirs = numpy_include_dirs)]
ext_modules.extend(my_extensions)
for g in [1, 2]:
e = Extension('psage.libs.smalljac.wrapper%s'%g,
sources = ['psage/libs/smalljac/wrapper%s.pyx'%g,
'psage/libs/smalljac/wrapper_g%s.c'%g],
libraries = ['gmp', 'm'])
ext_modules.append(e)
# I just had a long chat with Robert Bradshaw (a Cython dev), and he
# told me the following functionality -- turning an Extension with
# Cython code into one without -- along with proper dependency
# checking, is now included in the latest development version of
# Cython (Nov 2, 2010). It's supposed to be a rewrite he did of the
# code in the Sage library. Hence once that gets released, we should
# switch to using it here.
build_system.cythonize(ext_modules)
build_system.setup(
name = 'psage',
version = "2011.01.06",
description = "PSAGE: Software for Arithmetic Geometry",
author = 'William Stein',
author_email = 'wstein@gmail.com',
url = 'http://purple.sagemath.org',
license = 'GPL v2+',
packages = ['psage',
'psage.ellcurve',
'psage.ellcurve.lseries',
'psage.ellff',
'psage.function_fields',
'psage.lmfdb',
'psage.lmfdb.ellcurves',
'psage.lmfdb.ellcurves.sqrt5',
'psage.matrix',
'psage.modform',
'psage.modules',
'psage.modform.fourier_expansion_framework',
'psage.modform.fourier_expansion_framework.gradedexpansions',
'psage.modform.fourier_expansion_framework.modularforms',
'psage.modform.fourier_expansion_framework.monoidpowerseries',
'psage.modform.hilbert',
'psage.modform.hilbert.sqrt5',
'psage.modform.rational',
'psage.modform.siegel',
'psage.modform.jacobiforms',
'psage.modform.maass',
'psage.number_fields',
'psage.number_fields.sqrt5',
'psage.rh',
'psage.rh.mazur_stein',
'psage.zfunctions',
],
platforms = ['any'],
download_url = 'NA',
ext_modules = ext_modules
)
| Python |
#!/usr/bin/env python
import time
t = time.time()
u = time.gmtime(t)
s = time.strftime('%a, %e %b %Y %T GMT', u)
print 'Content-Type: text/javascript'
print 'Cache-Control: no-cache'
print 'Date: ' + s
print 'Expires: ' + s
print ''
print 'var timeskew = new Date().getTime() - ' + str(t*1000) + ';'
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
from twisted.application import internet, service
from franticfallacy import communications, engine, data, config
world = data.loadWorld(config.worldpath)
chatFac = communications.ChatFactory()
chatFac.motd = config.motd
chatFac.world = world
chatService = service.MultiService()
internet.TCPServer(config.port, chatFac).setServiceParent(chatService)
application = service.Application("Chatserver")
chatService.setServiceParent(application) | Python |
#ANSI colors
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38)
BOLD, NORMAL = 1, 22
def ansi(code):
return '\x1B[%sm' % code
def colstr(code, str, finish = 0):
return '%s%s%s' % (ansi(code), str, ansi(finish))
#message types
MSG_SYSTEM, MSG_SYS_INFO, MSG_SYS_EVENT, MSG_SYS_ERR, MSG_CHAT, MSG_EMOTE, MSG_GAME_INFO, MSG_GAME_EVENT, MSG_GAME_ERR, MSG_GAME_ATTACK = range(0, 10) | Python |
import os
import pprint
from franticfallacy import engine
from franticfallacy.datafile import grammar
class Loader(object):
target = None
rules = None
def __init__(self, object, ruleset):
self.target = object
self.rules = ruleset
def takeResults(self, results):
for entity in results:
print '%s loading %s' % (self, entity)
entattrs = entity.attrs.copy()
type = entattrs.pop('_key',None)
name = entattrs.pop('_value', [])[0]
print type
rule = self.rules.entTypes[type]
obj = rule.targClass()
store = getattr(self.target, self.rules.store[type])
store[ name ] = obj
if rule.named:
setattr(obj, rule.named, name)
loader = Loader(obj, rule)
for a in entattrs:
func = getattr(loader, rule.attrRules[a][0])
func(entattrs[a], *rule.attrRules[a][1:])
loader.takeResults(entity.children)
def loadAttrCollection(self, value, attr, type):
try:
setattr( self.target, attr, type(value) )
except TypeError:
setattr( self.target, attr, type(value[0]) )
def loadAttr(self, value, attr, type):
setattr( self.target, attr, type(value[0]) )
def addDictItem(self, item, attr):
dict = getattr(self.target, attr)
key = item[0]
try:
value = item[1]
except IndexError:
value = True
dict.update({key: value})
def __repr__(self):
return '<Loader with target %s and rules %s>' % (self.target, self.rules)
class LoadRuleSet(object):
targClass = None
attrRules = {}
entTypes = {}
#store = {}
named = False
def __init__(self):
self.store = {}
statusRules = LoadRuleSet()
statusRules.targClass = engine.Status
statusRules.attrRules = {
'duration':('loadAttr','duration', str),
'attrmult':('addDictItem','attrmult'),
'attradd':('addDictItem','attradd'),
'flag':('addDictItem','statusflags')
}
statusRules.named = 'name'
abilityRules = LoadRuleSet()
abilityRules.targClass = engine.Ability
abilityRules.attrRules = {
'sortorder':('loadAttr','sortorder', int),
'usable':('loadAttrCollection','usable', set),
'target':('loadAttrCollection','targetmode', set),
'mpcost':('loadAttr','mpcost', int),
'attackattr':('loadAttr','attackattr', str),
'attackpower':('loadAttr','attackpower', int),
'attacktype':('loadAttr','attacktype', str),
'usestring':('loadAttr','usestring', str),
'defendattr':('loadAttr','defendattr', str),
'attackflag':('addDictItem','attackflags')
}
abilityRules.entTypes['usestatus'] = statusRules
abilityRules.store['usestatus'] = 'usestatus'
worldFileRules = LoadRuleSet()
worldFileRules.entTypes['ability'] = abilityRules
worldFileRules.store['ability'] = 'abilities'
def loadWorld(dir):
world = engine.World()
#files = os.listdir(dir)
files = ['core.ff']
#print files
for fn in files:
try:
print 'loading ' + fn + '...'
f = open( os.path.join(dir, fn) )
name = os.path.splitext(fn)[0]
worldFile = engine.WorldFile(world, name)
world.worldFiles[name] = worldFile
loader = Loader(worldFile, worldFileRules)
results = grammar.parse_file(f)
loader.takeResults(results)
except IOError:
pass
#now that all objects are loaded, go through and change all usestring 'spellcast' to preset message, ability names to references, etc
return world
def loadWorldFile(worldfile, fileobj):
worldstuff = grammar.parse_file(fileobj)
pprint.pprint(worldstuff)
for entity in worldstuff:
print '\tloading %s...' % entity
etype = entity['_key']
name = entity['_value'][0] | Python |
from twisted.internet import defer, reactor
import random, copy, time
import operator
rng = random.SystemRandom()
from formatting import *
class EngineError(Exception):
pass
class AbilityError(EngineError):
pass
class NotReadyError(AbilityError):
pass
class DisallowedPlaceError(AbilityError):
pass
class NoMpError(AbilityError):
pass
class NoTargetError(AbilityError):
pass
class DeadTargetError(AbilityError):
pass
class NotLeaderError(EngineError):
pass
class JoinSelfError(EngineError):
pass
class GroupFullError(EngineError):
pass
class Status(object):
def __init__(self):
self.name = 'status'
self.duration = 'perm'
self.attrmult = {}
self.attradd = {}
self.statusflags = {}
class Ability(object):
def __init__(self):
self.sortorder = 0
self.usable = set()
self.targetmode = set()
self.mpcost = 0
self.attackattr = None
self.attackpower = 0
self.attacktype = None
self.attackflags = {}
self.hitverb = 'hits'
self.hitstring = '%(doer)s %(verb)s %(targ)s for %(dam)s!'
self.usestring = ''
self.defendattr = None
self.targetstatus = {}
self.usestatus = {}
def __cmp__(self,other):
return cmp(self.sortorder,other.sortorder)
def acquireTargets(self, doer, arg):
#all-targeting stuff
if 'all' in self.targetmode:
return doer.location.creatures
#self-targeting, by 'self' keyword or forced by ability mode
#if arg.lower() == 'self' or 'self' in self.targetmode:
if 'self' in self.targetmode:
return [doer]
#use of 'all' to target a group; which group chosen by def vs off ability
if arg.lower() == 'all' and 'group' in self.targetmode: #doesn't account for lifedeath flag
if 'def' in self.targetmode:
return [c for c in doer.party if c.hp>0]
if 'off' in self.targetmode:
return [c for c in doer.location.creatures if c not in doer.party and c.hp>0]
else:
raise NoTargetError
try: #no special case, look up an individual target
return [doer.location.namemap[arg.lower()]]
except KeyError:
raise NoTargetError
def finalDamage(self, basedmg, targ, targstat):
defense = targ.resist(self.attacktype) / targ.getstat(self.defendattr)
if 'const' in self.attackflags:
dmg = self.attackpower * defense
elif 'multcur' in self.attackflags:
dmg = targ.getstat(targstat) / (1/self.attackpower * defense)
elif 'multmax' in self.attackflags:
dmg = targ.getstat('max'+targstat) / (1/self.attackpower * defense)
else:
dmg = basedmg * defense
return int(dmg)
hitability = Ability()
hitability.usable.add('fight')
hitability.attackattr, hitability.defendattr, hitability.attackpower, hitability.attacktype = 'str', 'pdef', 1, 'hit'
hitability.sortorder = 0
guardability = Ability()
guardability.usable.add('fight')
guardability.usestatus = Status()
guardability.usestatus.name = 'guard'
guardability.usestatus.duration = 'turn'
guardability.usestatus.attrmult['pdef'] = 2
guardability.sortorder = 2
class Creature(object):
name = 'Creature'
hpmax, hp = 200, 200
mpmax, mp = 0, 0
str, mag, pdef, mdef, speed = 10, 10, 10, 10, 10
hitverb = 'hits'
location = None
readyTimer = None
ready = True
def __init__(self):
self.inventory = []
self.status = {}
self.abilities = {}
self.group = set()
@property
def delay(self):
return max(0, 15 - (self.speed / 2))
def getstat(self, statname):
stat = getattr(self, statname, None)
for status in self.status.values():
if statname in status.attradd:
stat += status.attradd[statname]
if statname in status.attrmult:
stat *= status.attrmult[statname]
return stat
def resist(self, damtype):
resist = 1.0
for status in self.status.values():
try:
resist *= status.statusflags['resist'][damtype]
except KeyError:
pass
return resist
@property
def colorname(self):
if self.hp <= 0:
return colstr( RED, self.name )
elif self.ready:
return colstr( GREEN, self.name )
return self.name
@property
def party(self): #just an alias for group in base class, trickier with players
return self.group
def stopReadyTimer(self):
if self.readyTimer is not None:
if self.readyTimer.active():
self.readyTimer.cancel()
self.readyTimer = None
def turnReady(self): #called by readyTimer when turn is ready
self.ready = True #implemented by subclasses too
#Player sends its owner a line indicating readiness to receive a command
#Monster calls its ai method
self.stopReadyTimer()
def die(self):
self.location.takeMessage(MSG_GAME_ATTACK, "%s dies!" % self.name)
self.ready = False
self.stopReadyTimer()
def takeMessage(self, type, message, speaker=None):
pass
def doCommand(self, line, subtable=None):
command, _, arg = line.partition(' ')
#self.location.takeMessage(MSG_SYSTEM, 'debug: %s doing command \'%s\' with arg \'%s\'' % (self.name, command, arg))
atable = self.abilities
if subtable is not None:
atable = subtable
if subtable is None and command in self.location.commands:
try:
self.location.doCommand(self, command, arg)
except NotLeaderError:
self.takeMessage(MSG_GAME_ERR, 'Only the group leader can do that.')
elif subtable is not None and command == '':
self.takeMessage(MSG_GAME_INFO, self.abilstring(atable) )
elif command in atable:
#self.takeMessage(MSG_SYSTEM, '%s in %s' % (command, atable))
try:
if atable[command].__class__ is Ability:
self.location.doAbility(self, atable[command], arg)
else:
self.doCommand(arg, atable[command])
except NotReadyError:
self.takeMessage(MSG_GAME_ERR, 'You\'re not yet ready to act!')
except DisallowedPlaceError:
self.takeMessage(MSG_GAME_ERR, 'Can\'t use \'%s\' here.' % command)
except NoMpError:
self.takeMessage(MSG_GAME_ERR, 'Not enough mp.')
except NoTargetError:
self.takeMessage(MSG_GAME_ERR, 'No such target \'%s\' here.' % arg)
except DeadTargetError:
self.takeMessage(MSG_GAME_ERR, 'Can\'t target dead guy.')
else:
self.takeMessage(MSG_GAME_ERR, 'Invalid action \'%s\'.' % command)
def prompt(self, full=0):
if full>0:
self.location.prompt(self)
self.takeMessage( MSG_GAME_INFO, self.statstring())
if full>0 and self.ready:
abilstring = self.abilstring()
if len(abilstring) > 0:
self.takeMessage( MSG_GAME_INFO, abilstring)
def statstring(self):
parts = {'hp': colstr(RED, '%d/%dhp ' % (self.hp, self.hpmax)), 'mp': '', 'time': ''}
if self.mpmax > 0:
parts['mp'] = colstr( CYAN, '%d/%dmp ' % (self.mp, self.mpmax), YELLOW)
if self.hp <= 0:
parts['time'] = colstr( RED, '(dead)' )
elif self.ready:
parts['time'] = colstr( '%d;%d' % (GREEN, BOLD), 'READY')
elif self.readyTimer is not None:
parts['time'] = colstr( GREEN, '-%.2f' % (self.readyTimer.getTime() - time.time()) )
return '[ %(hp)s%(mp)s%(time)s ]' % parts
def abilstring(self, subtable = None):
atable = self.abilities
if subtable is not None:
atable = subtable
abils = [abil for abil in sorted(atable,key=operator.itemgetter(1)) if self.location.allowsAbility(atable[abil])]
if len(abils) > 0:
return '{ %s }' % ' | '.join(abils)
return ''
def copyOther(self, other):
self.hpmax, self.hp = other.hpmax, other.hp
self.mpmax, self.mp = other.mpmax, other.mp
self.str, self.mag, self.pdef, self.mdef, self.speed = other.str, other.mag, other.pdef, other.mdef, other.speed
self.location = other.location
self.hitverb = other.hitverb
self.inventory = copy.deepcopy(other.inventory)
self.status = copy.deepcopy(other.status)
self.abilities = other.abilities
hitverb = 'hits'
class Player(Creature):
owner = None #communications.ChatProtocol
leader = None #another Player
cmdmode = '.'
def __init__(self, owner):
super(Player,self).__init__()
self.owner = owner
self.name = owner.nickname
self.gameState = {}
def takeMessage(self, type, message, speaker=None):
#do ignore lists etc
#do other game data substitutions
self.owner.message(type, message, speaker)
def doCommand(self, line, subtable=None):
if subtable is not None:
super(Player,self).doCommand(line, subtable)
elif line == '':
self.prompt(1)
elif (self.cmdmode==''):
if line.startswith('.'):
super(Player,self).doCommand('say ' + line[1:])
elif line.startswith(','):
super(Player,self).doCommand('emote ' + line[1:])
else:
super(Player,self).doCommand(line)
else:
if line.startswith(self.cmdmode):
super(Player,self).doCommand(line[len(self.cmdmode):])
elif line.startswith(','):
super(Player,self).doCommand('emote ' + line[1:])
else:
super(Player,self).doCommand('say ' + line)
@property
def party(self):
if self.leader is None:
return self.group
else:
return self.leader.group
@property
def groupLeader(self):
if self.leader is None:
return self
else:
return self.leader
def joinGroup(self, target):
if target is self:
raise JoinSelfError
if target.leader is not None:
target = target.leader
if len(target.group) >= 6:
raise GroupFullError
if self.leader is not None:
self.leaveGroup()
self.takeMessage(MSG_GAME_EVENT, 'You join %s\'s group.' % target.name )
for mate in target.group:
mate.takeMessage( MSG_GAME_EVENT, '%s joined %s group.' % (self.name, (mate is target) and 'your' or 'the') )
self.leader = target
self.leader.group.add(self)
self.group = None
def leaveGroup(self):
if self.leader is not None:
self.leader.group.remove(self)
self.takeMessage(MSG_GAME_EVENT, 'You leave %s\'s group.' % self.leader.name )
for mate in self.leader.group:
mate.takeMessage( MSG_GAME_EVENT, '%s left %s group.' % (self.name, (mate is self.leader) and 'your' or 'the') )
if self.group is not None and len(self.group) > 1:
self.takeMessage(MSG_GAME_EVENT, 'You disband the group.')
for mate in self.group.copy():
if mate is not self:
mate.takeMessage(MSG_GAME_EVENT, '%s disbanded the group.' % self.name )
mate.leaveGroup()
self.leader = None
self.group = set()
self.group.add(self)
def turnReady(self):
super(Player,self).turnReady()
self.location.takeMessage(MSG_GAME_EVENT, '%s is ready to act.' % self.name, self)
self.prompt(1)
def quit(self):
self.leaveGroup()
self.location.loseCreature(self)
#save character file
class Monster(Creature):
def __init__(self):
super(Monster, self).__init__()
def takeMessage(self, type, message, speaker=None):
if type == MSG_GAME_ERR:
print 'Monster got error: %s' % message
def turnReady(self):
super(Monster,self).turnReady()
#generalized ai here later
#self.location.takeMessage( MSG_GAME_EVENT, 'debug: %s is ready.' % self.name )
#self.doCommand('hit kand')
while self.ready:
#pick ability
abil = ('',self.abilities)
cmd = []
try:
while True:
abil = rng.choice( [ (k,v) for (k,v) in abil[1].items() if self.location.allowsAbility(v) ] )
cmd.append(abil[0])
except AttributeError: #'str' object has no attribute 'items', when we hit a real ability and not a group
pass
#pick target
if 'self' in abil[1].targetmode or 'all' in abil[1].targetmode:
pass #no target parameter required
elif 'def' in abil[1].targetmode:
cmd.append( rng.choice( [ c.name for c in self.group if c.hp > 0] ) ) #doesn't account for lifedeath flag
elif 'off' in abil[1].targetmode:
cmd.append( rng.choice( [ c.name for c in self.location.creatures - self.group if c.hp > 0] ) ) #doesn't account for lifedeath flag
else:
pass
#self.ready = False #break infinite loops
self.doCommand( ' '.join(cmd) )
if self.ready:
print 'Monster still ready after: %s' % cmd
class Role(Creature):
pass
class Place(object):
def __init__(self):
self.creatures = set()
self.namemap = {}
self.commands = {'say': self.command_say, 'emote': self.command_emote, 'me': self.command_emote, 'look': self.command_look, 's': self.command_status, 'status': self.command_status, 'help': self.command_help, '?': self.command_help}
def takeCreature(self, creature):
self.creatures.add(creature)
self.namemap[creature.name.lower()] = creature
if creature.location:
creature.location.loseCreature(creature)
creature.location = self
def loseCreature(self, creature):
self.creatures.remove(creature)
del self.namemap[creature.name.lower()]
def takeMessage(self, type, message, speaker=None):
for creature in self.creatures:
creature.takeMessage(type, message, speaker)
def doCommand(self, doer, command, arg):
#if doer allowed command &c here
if command in self.commands:
self.commands[command](doer, arg)
def command_say(self, speaker, message):
self.takeMessage(MSG_CHAT, message, speaker)
def command_emote(self, speaker, message):
self.takeMessage(MSG_EMOTE, message, speaker)
def command_look(self, looker, arg): #implemented by subclasses
looker.takeMessage(MSG_GAME_INFO, 'You see nothing.')
def command_status(self, checker, arg):
group = checker.party
checker.takeMessage(MSG_GAME_INFO, '[ Status of %s\'s group: ]' % checker.groupLeader.name)
maxnamelen = max([len(p.name) for p in group])
for player in group:
checker.takeMessage(MSG_GAME_INFO, '[ %*s: ]%s' % ( maxnamelen, player.name, player.statstring() ))
def command_help(self, player, arg):
player.takeMessage( MSG_GAME_INFO , 'Available game commands: %s' % ' '.join(self.commands) )
player.takeMessage( MSG_GAME_INFO , 'Your available abilities: %s' % player.abilstring() )
player.takeMessage( MSG_GAME_INFO , 'For server commands see \'/help\'')
def prompt(self, creature): #implemented by subclasses
pass
def allowsAbility(self, ability):
return False
def doAbility(self, doer, ability, arg):
if not self.allowsAbility(ability):
raise DisallowedPlaceError
if doer.mp < ability.mpcost:
raise NoMpError
usevars = {'doer': doer.name, 'arg': arg, 'mpcost': colstr(YELLOW, '(%d)' % ability.mpcost) }
if ability.attackpower:
aflags = ability.attackflags
if ability.attacktype == 'hit':
for status in doer.status.values():
if 'hitflags' in status.statusflags:
aflags = aflags | status.statusflags['hitflags']
power = ability.attackpower * doer.getstat(ability.attackattr)
targets = ability.acquireTargets(doer, arg)
if len(targets) > 1:
doer.mp -= ability.mpcost
power /= len(targets)
if len(ability.usestring) > 0:
self.takeMessage(MSG_GAME_ATTACK, ability.usestring % usevars)
for target in targets:
basedmg = rng.gauss(power * 100, power * 18)
if target.hp <= 0 and 'lifedeath' not in ability.attackflags:
raise DeadTargetError
if len(targets) == 1:
doer.mp -= ability.mpcost
if len(ability.usestring) > 0:
usevars['targ'] = target.name
self.takeMessage(MSG_GAME_ATTACK, ability.usestring % usevars)
#reflect
#evasion
#cover
damage = ability.finalDamage(basedmg, target, 'hp')
hitvars = {'doer': doer.name, 'targ': target.name, 'verb': ability.hitverb, 'dam': colstr( damage < 0 and GREEN or RED , abs(damage) ) }
if ability.attacktype == 'hit':
hitvars['verb'] = doer.hitverb
self.takeMessage(MSG_GAME_ATTACK, ability.hitstring % hitvars)
target.hp -= damage
#lifedeath flag handling
target.hp = min(target.hp, target.hpmax)
if target.hp <= 0:
target.die()
else:
if ability.targetstatus is not None:
tstatus = copy.deepcopy(ability.targetstatus)
target.status.update(tstatus)
#stun, steal, clearbuffs, cleardebuffs
#counter
if target is not doer:
target.prompt()
else:
doer.mp -= ability.mpcost
if len(ability.usestring) > 0:
self.takeMessage(MSG_GAME_ATTACK, ability.usestring % usevars)
for turnstatus in [s for s in doer.status if doer.status[s].duration == 'turn']:
del doer.status[turnstatus]
if ability.usestatus is not None:
ustatus = copy.deepcopy(ability.usestatus)
doer.status.update(ustatus)
return True
class Zone(Place):
def __init__(self, world, file, name):
super(Zone, self).__init__()
self.world = world
self.file = file
self.name = name
self.fightprob = 0
self.discoveries = {}
self.lookmsg = []
self.exit = {}
self.fightprofiles = {}
self.flags = set()
self.commands.update({'explore': self.command_explore, 'travel': self.command_travel, 'join': self.command_join, 'leave': self.command_leave, 'heal': self.command_heal})
def prompt(self, creature):
group = creature.party
if len(group) > 1:
creature.takeMessage( MSG_GAME_INFO , '[%s\'s group: %s]' % (creature.groupLeader.name, ', '.join([c.name for c in group]) ))
def allowsAbility(self, ability):
try:
if 'zone' in ability.usable:
return True
except AttributeError:
if len( [a for a in ability if self.allowsAbility(ability[a])] ) > 0:
return True
return False
def command_look(self, looker, arg):
msg = 'You see nothing.'
for m in self.lookmsg:
if True: #evaluate m.req
msg = m.msg
looker.takeMessage(MSG_GAME_INFO, msg)
others = [c.name for c in self.creatures if c is not looker]
if len(others) == 0:
others = ['No one.']
looker.takeMessage(MSG_GAME_INFO, 'Here: %s' % ', '.join(others) )
#show discoveries, exits
def command_join(self, joiner, arg):
try:
joiner.joinGroup(self.namemap[arg.lower()])
except KeyError:
joiner.takeMessage(MSG_GAME_ERR, 'They\'re not here.')
except JoinSelfError:
joiner.takeMessage(MSG_GAME_ERR, 'You can\'t follow yourself, you\'d just go in circles!')
except GroupFullError:
joiner.takeMessage(MSG_GAME_ERR, 'Their group is full.')
def command_leave(self, leaver, arg):
if leaver.leader is None and len(leaver.group) <= 1:
joiner.takeMessage(MSG_GAME_ERR, 'You\'re not in a group.')
else:
leaver.leaveGroup()
def command_explore(self, player, arg):
if player.leader is not None:
raise NotLeaderError
possibleDiscoveries = []
for d in self.discoveries:
if rng.random() <= d.p: #and player's explore count is high enough
#evaluate reqs, then if they pass:
possibleDiscoveries.append(d)
#if there are any possibleDiscoveries, then we want the player to discover rng.choice(possibleDiscoveries)
if rng.random() <= self.fightprob:
fight = Fight(self)
mgroup = set()
spells = ( {'white':{'heal':self.world.worldFiles['core'].abilities['heal']}} , {'black':{'fire':self.world.worldFiles['core'].abilities['fire']}} )
nplayers = len(player.group)
for i in range( 0,rng.randint(nplayers, int(nplayers * 2.5)) ):
monster = self.world.newMonster('core', 'beast')
monster.name = 'Beast%d' % (i+1)
magic = random.choice(spells)
monster.abilities.update(magic)
monster.mp,monster.mpmax = 20,20
mgroup.add(monster)
monster.group = mgroup
fight.takeGroup( '%s\'s group' % player.name, player.group )
fight.takeGroup( 'Monsters', mgroup )
fight.start()
def command_travel(self, player, arg):
pass
def command_heal(self, player, arg): #temporary testing command
player.hp, player.mp = player.hpmax, player.mpmax
player.takeMessage(MSG_GAME_EVENT, 'The Goddess mends your wounds and cleanses your spirit.')
player.prompt()
class LookMessage(object):
def __init__(self, msg):
self.msg = msg
self.req = None
class Discovery(object):
def __init__(self):
self.explore = 0
self.p = 1
self.lookmsg = []
self.req = None
self.consq = None
class Fight(Place):
def __init__(self, zone):
super(Fight, self).__init__()
self.zone = zone
self.groups = {}
self.dead = set()
def prompt(self, creature):
for gname in self.groups:
creature.takeMessage( MSG_GAME_INFO , '[%s: %s]' % (gname, ', '.join([c.colorname for c in self.groups[gname]]) ))
def allowsAbility(self, ability):
try:
if 'fight' in ability.usable:
return True
except AttributeError:
if len( [a for a in ability if self.allowsAbility(ability[a])] ) > 0:
return True
return False
def doAbility(self, doer, ability, arg):
if not self.allowsAbility(ability):
raise DisallowedPlaceError
if doer.ready:
if super(Fight,self).doAbility(doer, ability, arg):
doer.ready = False
doer.stopReadyTimer()
doer.readyTimer = reactor.callLater(doer.getstat('delay'), doer.turnReady)
doer.prompt()
self.checkForEnd()
else:
raise NotReadyError
def takeGroup(self, name, group):
self.groups[name] = group
for creature in group:
self.takeCreature(creature)
def start(self):
self.takeMessage( MSG_GAME_EVENT , 'A fight starts!')
for creature in self.creatures:
creature.ready = False
creature.readyTimer = reactor.callLater(creature.getstat('delay') / (rng.random() + 1), creature.turnReady)
for creature in self.creatures:
creature.prompt(1)
def checkForEnd(self):
winners = [ g for g in self.groups if len([c for c in self.groups[g] if c.hp>0]) ]
if len(winners) < 2:
for w in winners:
self.takeMessage( MSG_GAME_EVENT , 'Victorious: %s!' % w)
#dole out exp
self.end()
def end(self):
for creature in self.creatures:
creature.stopReadyTimer()
for player in [c for c in self.creatures if c.__class__ is Player]:
self.zone.takeCreature(player)
player.hp = max(player.hp, 1)
player.ready = True
player.doCommand('%slook' % player.cmdmode)
player.prompt()
class World(object):
def __init__(self):
self.worldFiles = {}
self.defaultZone = None
self.roles = {}
def newPlayer(self, owner):
char = Player(owner)
char.group.add(char)
return char
def newMonster(self, file, name):
monster = Monster()
monster.copyOther(self.worldFiles[file].creatures[name])
return monster
def assignRole(self, char, rolename):
try:
char.copyOther(self.roles[rolename])
return True
except KeyError:
return False
def roleList(self):
return '{ %s }' % ' | '.join(self.roles)
class WorldFile(object):
def __init__(self, world, name):
self.world = world
self.name = name
self.zones = {}
self.creatures = {}
self.abilities = {}
self.status = {}
class PlayerState(object):
def __init__(self):
self.owner = None
self.zone = None
self.discovered = {}
self.explore = 0 | Python |
from twisted.internet import protocol, defer, reactor
from twisted.internet.task import LoopingCall
from twisted.protocols import basic
import re
import time
from franticfallacy.formatting import *
try:
set
except NameError:
from sets import Set as set
CONSTATE_NAME, CONSTATE_PWD, CONSTATE_ROLE, CONSTATE_MAIN = range(0,4)
TIMEOUTTIME = 1800
class ChatProtocol(basic.LineReceiver):
state = CONSTATE_NAME
# 0: waiting for nickname
# 1: waiting for password
# 2: logged in
new = 0
nickname = None
character = None
def connectionMade(self):
#welcome and prompt for name or new
self.message( MSG_SYSTEM , self.factory.motd )
#self.message( MSG_SYS_EVENT , 'Login: (\'new\' for new)' )
self.message( MSG_SYS_EVENT , 'Enter name:' )
def connectionLost(self, reason):
self.factory.unregister(self.nickname, self)
self.character.quit()
def lineReceived(self, line):
if self.state == CONSTATE_NAME: #receiving name
#if line == 'new':
#else:
self.nickname = line
if re.match(r'\A[\w!"$-.:-@[-`{-~]{1,15}\Z', self.nickname) is None or self.nickname.lower() in ('new', 'all', 'self'):
self.message( MSG_SYS_ERR , 'Invalid name.' )
elif self.factory.register(self.nickname, self):
self.character = self.factory.world.newPlayer(self)
self.message( MSG_SYS_INFO , 'Choose a character role: %s' % self.factory.world.roleList() )
self.state = CONSTATE_ROLE
else:
self.message( MSG_SYS_ERR , 'Name in use.' )
elif self.state == CONSTATE_PWD: #receiving password
state = CONSTATE_NAME #not yet implemented, so get out
elif self.state == CONSTATE_ROLE: #choosing character role
if self.factory.world.assignRole(self.character, line):
self.message( MSG_SYS_INFO , 'Welcome!' )
self.state = CONSTATE_MAIN
self.factory.world.defaultZone.takeCreature(self.character)
self.character.doCommand('%slook' % self.character.cmdmode)
else:
self.message( MSG_SYS_ERR , 'Invalid role. Choose: %s' % self.factory.world.roleList() )
elif self.state == CONSTATE_MAIN: #main logged in
if line.startswith('/'): #server command
command, _, arg = line[1:].partition(' ')
func = self.commands.get(command, None)
if func is None:
self.message( MSG_SYS_ERR , 'Invalid command \'%s\'' % command )
else:
func(self, arg)
else: #game command, send to character
self.character.doCommand(line)
def message(self, type, line, creature = None):
if type == MSG_SYSTEM:
line = colstr( MAGENTA, line )
elif type == MSG_SYS_INFO:
line = colstr( YELLOW, line )
elif type == MSG_SYS_EVENT:
line = colstr( GREEN, line )
elif type == MSG_SYS_ERR:
line = colstr( RED, line )
elif type == MSG_CHAT:
line = '%s %s' % (colstr( creature is self.character and CYAN or YELLOW, '<%s>' % creature.name), line)
elif type == MSG_EMOTE:
line = colstr( YELLOW, '<%s %s>' % (creature.name, line))
elif type == MSG_GAME_INFO:
line = colstr( YELLOW, line )
elif type == MSG_GAME_EVENT:
line = colstr( GREEN, line )
if creature is self.character:
line = colstr( BOLD, line.replace(creature.name, 'You').replace('is', 'are') )
elif type == MSG_GAME_ERR:
line = colstr( RED, line )
self.sendLine(line)
def command_quit(self, arguments):
self.message( MSG_SYSTEM , 'Bye!' )
self.transport.loseConnection()
def command_time(self, arguments):
self.message( MSG_SYS_INFO , 'System time: %s' % time.asctime() )
def command_cmdmode(self, arguments):
if arguments:
self.character.cmdmode = arguments
self.message( MSG_SYS_EVENT , 'Set game command prefix to \'%s\'.' % arguments )
else:
self.character.cmdmode = ''
self.message( MSG_SYS_EVENT , 'Cleared game command prefix.')
def command_help(self, arguments):
self.message( MSG_SYS_INFO , 'Available server commands: %s' % ' '.join(self.commands) )
self.message( MSG_SYS_INFO , 'For game commands see \'%shelp\'' % self.character.cmdmode)
commands = {'quit': command_quit, 'time': command_time, 'cmdmode': command_cmdmode, 'help': command_help, '?': command_help}
class ChatFactory(protocol.ServerFactory):
protocol = ChatProtocol
def __init__(self):
self.active_clients = set()
self.illegal_names = set()
self.nickname_map = {}
self.world = None
self.motd = ''
def register(self, nickname, listener):
if nickname.lower() in self.nickname_map:
return False
self.active_clients.add(listener)
self.nickname_map[nickname.lower()] = listener
self.sysMessage(MSG_SYSTEM, '%s connected.' % nickname)
return True
def unregister(self, nickname, listener):
self.active_clients.remove(listener)
del self.nickname_map[nickname.lower()]
self.sysMessage(MSG_SYSTEM, '%s disconnected.' % nickname)
def sysMessage(self, type, message):
for client in self.active_clients:
client.message(type, message) | Python |
import pyparsing as p
single_quoted_string = p.QuotedString("'", '\\')
double_quoted_string = p.QuotedString('"', '\\')
numeral = p.Word(p.nums)
number = p.Combine(p.Optional(p.Literal('-')) + numeral +
p.Optional(p.Literal('.') + p.Optional(numeral)))
dee = p.Literal('d').suppress()
dice_roll = numeral + dee + numeral
dice_rolls = p.delimitedList(dice_roll | numeral, '+')
identifier = p.Word(p.alphas + '_')
_value = (single_quoted_string | double_quoted_string |
dice_rolls ^ number | identifier)
comma = p.Literal(',').suppress()
series = p.Group(_value + (p.OneOrMore(comma + _value) | comma))
value = series | _value
colon = p.Literal(':').suppress()
lbrack = p.Literal('[').suppress()
rbrack = p.Literal(']').suppress()
def _mark_place(s, l, tok):
tok[0] = tok[0], s, l
return tok
block = p.Group(lbrack +
identifier.setResultsName('key') +
p.Group(p.ZeroOrMore(colon + value)).setResultsName('value') +
rbrack).setResultsName('block').setParseAction(_mark_place)
leading_indent = p.Optional(p.White(' \t')).setResultsName('leading_indent')
line = leading_indent + p.Group(
block + p.ZeroOrMore(~p.LineEnd() + block)).setResultsName('blocks')
def _fail_hard(s, loc, expr, err):
raise p.ParseFatalException(s, loc,
'parse error before last left bracket')
file = (p.ZeroOrMore(
p.SkipTo((leading_indent + lbrack).leaveWhitespace()).suppress() +
p.Group(line)) +
(~p.SkipTo(lbrack)).setFailAction(_fail_hard))
class Result(object):
def __init__(self, name, value, **attrs):
self.name = name
self.value = value
self.attrs = attrs
self.children = []
def add_child(self, _child=None, **attrs):
if _child is None:
_child = Result(**attrs)
self.children.append(_child)
return _child
def __repr__(self):
return '<Result %s:%r %r with %d children>' % (
self.name, self.value, self.attrs, len(self.children))
def __getitem__(self, key):
return self.attrs[key]
def __setitem__(self, key, value):
self.attrs[key] = value
def get(self, key, default=None):
return self.attrs.get(key, default)
def parse_file(filename):
ret = []
stack = []
def handle_dedent(target_level):
level = None
while stack and target_level <= stack[-1][0]:
level, result = stack.pop()
if stack:
if result.children or result.attrs:
stack[-1][1].add_child(result)
else:
stack[-1][1][result.name] = result.value
else:
ret.append(result)
if level is not None and level != target_level:
raise p.ParseException(_s, _l,
'unindent does not match any outer indentation level')
level = last_level = -1
last_count = 0
for tok in file.parseFile(filename):
last_level, level = level, len(tok.leading_indent)
for block, _s, _l in tok.blocks:
if last_level == -1 and level > 0:
raise p.ParseException(_s, _l,
'indent with no root block')
elif level > last_level and last_count > 1:
raise p.ParseException(_s, _l, 'ambiguous indent')
result = Result(block.key, block.value.asList())
handle_dedent(level)
stack.append((level, result))
last_count = len(tok.blocks)
handle_dedent(0)
return ret
| Python |
port = 4002
motd = 'Frantic Fallacy 2a1'
worldpath = 'data/world'
playerpath = 'data/player' | Python |
from twisted.application import internet, service
from franticfallacy import communications, engine, data, config
world = data.loadWorld(config.worldpath)
chatFac = communications.ChatFactory()
chatFac.motd = config.motd
chatFac.world = world
chatService = service.MultiService()
internet.TCPServer(config.port, chatFac).setServiceParent(chatService)
application = service.Application("Chatserver")
chatService.setServiceParent(application) | Python |
#ANSI colors
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38)
BOLD, NORMAL = 1, 22
def ansi(code):
return '\x1B[%sm' % code
def colstr(code, str, finish = 0):
return '%s%s%s' % (ansi(code), str, ansi(finish))
#message types
MSG_SYSTEM, MSG_SYS_INFO, MSG_SYS_EVENT, MSG_SYS_ERR, MSG_CHAT, MSG_EMOTE, MSG_GAME_INFO, MSG_GAME_EVENT, MSG_GAME_ERR, MSG_GAME_ATTACK = range(0, 10) | Python |
import copy
import engine
import os
def loadWorld(dir):
world = engine.World()
files = os.listdir(dir)
#print files
for fn in files:
try:
f = open( os.path.join(dir, fn) )
name = os.path.splitext(fn)[0]
worldFile = engine.WorldFile(name)
world.worldFiles[name] = worldFile
loadWorldFile(worldFile, f)
except IOError:
pass
world.worldFiles['core'] = engine.WorldFile('core')
limbo = engine.Zone(world, world.worldFiles['core'], 'limbo')
limbo.lookmsg.append(engine.LookMessage('You are floating in a void.'))
limbo.fightprob = 1
world.worldFiles['core'].zones['limbo'] = limbo
world.defaultZone = limbo
hitability = engine.Ability()
#target single other
hitability.usable.add('fight')
hitability.attackattr, hitability.defendattr, hitability.attackpower, hitability.attacktype = 'str', 'pdef', 1, 'hit'
hitability.sortorder = 0
world.worldFiles['core'].abilities['hit'] = hitability
guardability = engine.Ability()
#target self
guardability.usable.add('fight')
guardability.usestatus = engine.Status()
guardability.usestatus.name = 'guard'
guardability.usestatus.duration = 'turn'
guardability.usestatus.attrmult['pdef'] = 2
guardability.sortorder = 2
guardability.usestring = '%(doer)s guards.'
world.worldFiles['core'].abilities['guard'] = guardability
heal = engine.Ability()
#target other, 'all' hits friendlies
heal.usable.add('fight')
heal.usable.add('zone')
heal.attackattr, heal.defendattr, heal.attackpower, heal.attacktype = 'mag', 'mdef', 2.5, 'heal'
heal.hitverb = 'heals'
heal.usestring = '%(doer)s casts heal-- %(mpcost)s'
heal.mpcost = 2
heal.sortorder = 0
world.worldFiles['core'].abilities['heal'] = heal
fire = engine.Ability()
#target other, 'all' hits hostiles
fire.usable.add('fight')
fire.attackattr, fire.defendattr, fire.attackpower, fire.attacktype = 'mag', 'mdef', 1, 'fire'
fire.hitverb = 'burn'
fire.hitstring = "%(targ)s is %(verb)sed for %(dam)s!"
fire.usestring = '%(doer)s casts fire-- %(mpcost)s'
fire.mpcost = 2
fire.sortorder = 0
world.worldFiles['core'].abilities['fire'] = fire
alive = engine.Status()
alive.name = 'alive'
alive.statusflags['resist'] = {'heal': -1}
beast = engine.Monster()
beast.magdef = 5
beast.hitverb = 'claws'
for abil in ['hit']:
beast.abilities[abil] = world.worldFiles['core'].abilities[abil]
beast.status['alive'] = copy.copy(alive)
world.worldFiles['core'].creatures['beast'] = beast
tester = engine.Role()
tester.hp, tester.hpmax = 600,600
tester.mp, tester.mpmax = 10,10
tester.str = 14
for abil in ['hit', 'guard']:
tester.abilities[abil] = world.worldFiles['core'].abilities[abil]
tester.abilities['white'] = {'heal': world.worldFiles['core'].abilities['heal']}
tester.abilities['black'] = {'fire': world.worldFiles['core'].abilities['fire']}
tester.status['alive'] = copy.copy(alive)
world.worldFiles['core'].creatures['role_tester'] = tester
world.roles['tester'] = tester
return world
def loadWorldFile(worldfile, fileobj):
pass | Python |
from twisted.internet import defer, reactor
import random, copy, time
import operator
rng = random.SystemRandom()
from formatting import *
class EngineError(Exception):
pass
class AbilityError(EngineError):
pass
class NotReadyError(AbilityError):
pass
class DisallowedPlaceError(AbilityError):
pass
class NoMpError(AbilityError):
pass
class NoTargetError(AbilityError):
pass
class DeadTargetError(AbilityError):
pass
class NotLeaderError(EngineError):
pass
class JoinSelfError(EngineError):
pass
class GroupFullError(EngineError):
pass
class Status(object):
def __init__(self):
self.name = 'status'
self.duration = 'perm'
self.attrmult = {}
self.attradd = {}
self.statusflags = {}
class Ability(object):
def __init__(self):
self.sortorder = 0
self.usable = set()
self.targetmode = set()
self.mpcost = 0
self.attackattr = None
self.attackpower = 0
self.attacktype = None
self.attackflags = set()
self.hitverb = 'hits'
self.hitstring = '%(doer)s %(verb)s %(targ)s for %(dam)s!'
self.usestring = ''
self.defendattr = None
self.targetstatus = None
self.usestatus = None
def __cmp__(self,other):
return cmp(self.sortorder,other.sortorder)
def acquireTargets(self, doer, arg):
#prototype is single targeting
if arg.lower() == 'self':
return [doer]
try:
return [doer.location.namemap[arg.lower()]]
except KeyError:
raise NoTargetError
def finalDamage(self, basedmg, targ, targstat):
defense = targ.resist(self.attacktype) / targ.getstat(self.defendattr)
if 'const' in self.attackflags:
dmg = self.attackpower * defense
elif 'multcur' in self.attackflags:
dmg = targ.getstat(targstat) / (1/self.attackpower * defense)
elif 'multmax' in self.attackflags:
dmg = targ.getstat('max'+targstat) / (1/self.attackpower * defense)
else:
dmg = basedmg * defense
return int(dmg)
hitability = Ability()
hitability.usable.add('fight')
hitability.attackattr, hitability.defendattr, hitability.attackpower, hitability.attacktype = 'str', 'pdef', 1, 'hit'
hitability.sortorder = 0
guardability = Ability()
guardability.usable.add('fight')
guardability.usestatus = Status()
guardability.usestatus.name = 'guard'
guardability.usestatus.duration = 'turn'
guardability.usestatus.attrmult['pdef'] = 2
guardability.sortorder = 2
class Creature(object):
name = 'Creature'
hpmax, hp = 200, 200
mpmax, mp = 0, 0
str, mag, pdef, mdef, speed = 10, 10, 10, 10, 10
hitverb = 'hits'
location = None
readyTimer = None
ready = True
def __init__(self):
self.inventory = []
self.status = {}
self.abilities = {}
self.group = set()
@property
def delay(self):
return max(0, 15 - (self.speed / 2))
def getstat(self, statname):
stat = getattr(self, statname, None)
for status in self.status.values():
if statname in status.attradd:
stat += status.attradd[statname]
if statname in status.attrmult:
stat *= status.attrmult[statname]
return stat
def resist(self, damtype):
resist = 1.0
for status in self.status.values():
try:
resist *= status.statusflags['resist'][damtype]
except KeyError:
pass
return resist
@property
def colorname(self):
if self.hp <= 0:
return colstr( RED, self.name )
elif self.ready:
return colstr( GREEN, self.name )
return self.name
def stopReadyTimer(self):
if self.readyTimer is not None:
if self.readyTimer.active():
self.readyTimer.cancel()
self.readyTimer = None
def turnReady(self): #called by readyTimer when turn is ready
self.ready = True #implemented by subclasses too
#Player sends its owner a line indicating readiness to receive a command
#Monster calls its ai method
self.stopReadyTimer()
def die(self):
self.location.takeMessage(MSG_GAME_ATTACK, "%s dies!" % self.name)
self.ready = False
self.stopReadyTimer()
def takeMessage(self, type, message, speaker=None):
pass
def doCommand(self, line, subtable=None):
command, _, arg = line.partition(' ')
#self.location.takeMessage(MSG_SYSTEM, 'debug: %s doing command \'%s\' with arg \'%s\'' % (self.name, command, arg))
atable = self.abilities
if subtable is not None:
atable = subtable
if subtable is None and command in self.location.commands:
try:
self.location.doCommand(self, command, arg)
except NotLeaderError:
self.takeMessage(MSG_GAME_ERR, 'Only the group leader can do that.')
elif subtable is not None and command == '':
self.takeMessage(MSG_GAME_INFO, self.abilstring(atable) )
elif command in atable:
#self.takeMessage(MSG_SYSTEM, '%s in %s' % (command, atable))
try:
if atable[command].__class__ is Ability:
self.location.doAbility(self, atable[command], arg)
else:
self.doCommand(arg, atable[command])
except NotReadyError:
self.takeMessage(MSG_GAME_ERR, 'You\'re not yet ready to act!')
except DisallowedPlaceError:
self.takeMessage(MSG_GAME_ERR, 'Can\'t use \'%s\' here.' % command)
except NoMpError:
self.takeMessage(MSG_GAME_ERR, 'Not enough mp.')
except NoTargetError:
self.takeMessage(MSG_GAME_ERR, 'No such target \'%s\' here.' % arg)
except DeadTargetError:
self.takeMessage(MSG_GAME_ERR, 'Can\'t target dead guy.')
else:
self.takeMessage(MSG_GAME_ERR, 'Invalid action \'%s\'.' % command)
def prompt(self, full=0):
if full>0:
self.location.prompt(self)
self.takeMessage( MSG_GAME_INFO, self.statstring())
if full>0 and self.ready:
abilstring = self.abilstring()
if len(abilstring) > 0:
self.takeMessage( MSG_GAME_INFO, abilstring)
def statstring(self):
parts = {'hp': colstr(RED, '%d/%dhp ' % (self.hp, self.hpmax)), 'mp': '', 'time': ''}
if self.mpmax > 0:
parts['mp'] = colstr( CYAN, '%d/%dmp ' % (self.mp, self.mpmax), YELLOW)
if self.hp <= 0:
parts['time'] = colstr( RED, '(dead)' )
elif self.ready:
parts['time'] = colstr( '%d;%d' % (GREEN, BOLD), 'READY')
elif self.readyTimer is not None:
parts['time'] = colstr( GREEN, '-%.2f' % (self.readyTimer.getTime() - time.time()) )
return '[ %(hp)s%(mp)s%(time)s ]' % parts
def abilstring(self, subtable = None):
atable = self.abilities
if subtable is not None:
atable = subtable
abils = [abil for abil in sorted(atable,key=operator.itemgetter(1)) if self.location.allowsAbility(atable[abil])]
if len(abils) > 0:
return '{ %s }' % ' | '.join(abils)
return ''
def copyOther(self, other):
self.hpmax, self.hp = other.hpmax, other.hp
self.mpmax, self.mp = other.mpmax, other.mp
self.str, self.mag, self.pdef, self.mdef, self.speed = other.str, other.mag, other.pdef, other.mdef, other.speed
self.location = other.location
self.hitverb = other.hitverb
self.inventory = copy.deepcopy(other.inventory)
self.status = copy.deepcopy(other.status)
self.abilities = other.abilities
hitverb = 'hits'
class Player(Creature):
owner = None #communications.ChatProtocol
leader = None #another Player
cmdmode = '.'
def __init__(self, owner):
super(Player,self).__init__()
self.owner = owner
self.name = owner.nickname
self.gameState = {}
def takeMessage(self, type, message, speaker=None):
#do ignore lists etc
#do other game data substitutions
self.owner.message(type, message, speaker)
def doCommand(self, line, subtable=None):
if subtable is not None:
super(Player,self).doCommand(line, subtable)
elif line == '':
self.prompt(1)
elif (self.cmdmode==''):
if line.startswith('.'):
super(Player,self).doCommand('say ' + line[1:])
elif line.startswith(','):
super(Player,self).doCommand('emote ' + line[1:])
else:
super(Player,self).doCommand(line)
else:
if line.startswith(self.cmdmode):
super(Player,self).doCommand(line[len(self.cmdmode):])
elif line.startswith(','):
super(Player,self).doCommand('emote ' + line[1:])
else:
super(Player,self).doCommand('say ' + line)
@property
def party(self):
if self.leader is None:
return self.group
else:
return self.leader.group
@property
def groupLeader(self):
if self.leader is None:
return self
else:
return self.leader
def joinGroup(self, target):
if target is self:
raise JoinSelfError
if target.leader is not None:
target = target.leader
if len(target.group) >= 6:
raise GroupFullError
if self.leader is not None:
self.leaveGroup()
self.takeMessage(MSG_GAME_EVENT, 'You join %s\'s group.' % target.name )
for mate in target.group:
mate.takeMessage( MSG_GAME_EVENT, '%s joined %s group.' % (self.name, (mate is target) and 'your' or 'the') )
self.leader = target
self.leader.group.add(self)
self.group = None
def leaveGroup(self):
if self.leader is not None:
self.leader.group.remove(self)
self.takeMessage(MSG_GAME_EVENT, 'You leave %s\'s group.' % self.leader.name )
for mate in self.leader.group:
mate.takeMessage( MSG_GAME_EVENT, '%s left %s group.' % (self.name, (mate is self.leader) and 'your' or 'the') )
if self.group is not None and len(self.group) > 1:
self.takeMessage(MSG_GAME_EVENT, 'You disband the group.')
for mate in self.group.copy():
if mate is not self:
mate.takeMessage(MSG_GAME_EVENT, '%s disbanded the group.' % self.name )
mate.leaveGroup()
self.leader = None
self.group = set()
self.group.add(self)
def turnReady(self):
super(Player,self).turnReady()
self.location.takeMessage(MSG_GAME_EVENT, '%s is ready to act.' % self.name, self)
self.prompt(1)
def quit(self):
self.leaveGroup()
self.location.loseCreature(self)
#save character file
class Monster(Creature):
def __init__(self):
super(Monster, self).__init__()
def turnReady(self):
super(Monster,self).turnReady()
#generalized ai here later
#self.location.takeMessage( MSG_GAME_EVENT, 'debug: %s is ready.' % self.name )
while self.ready:
abil = rng.choice( [ key for key in self.abilities.keys() if self.location.allowsAbility(self.abilities[key]) ] )
targ = rng.choice( [ creature.name for creature in self.location.creatures if creature.__class__ is Player and creature.hp>0 ] )
#self.location.takeMessage( MSG_SYSTEM, 'debug: monster abil \'%s\' targ \'%s\' command \'%s\'' % (abil, targ, '%s %s' % (abil, targ)) )
self.doCommand('%s %s' % (abil, targ))
class Role(Creature):
pass
class Place(object):
def __init__(self):
self.creatures = set()
self.namemap = {}
self.commands = {'say': self.command_say, 'emote': self.command_emote, 'me': self.command_emote, 'look': self.command_look, 's': self.command_status, 'status': self.command_status, 'help': self.command_help, '?': self.command_help}
def takeCreature(self, creature):
self.creatures.add(creature)
self.namemap[creature.name.lower()] = creature
if creature.location:
creature.location.loseCreature(creature)
creature.location = self
def loseCreature(self, creature):
self.creatures.remove(creature)
del self.namemap[creature.name.lower()]
def takeMessage(self, type, message, speaker=None):
for creature in self.creatures:
creature.takeMessage(type, message, speaker)
def doCommand(self, doer, command, arg):
#if doer allowed command &c here
if command in self.commands:
self.commands[command](doer, arg)
def command_say(self, speaker, message):
self.takeMessage(MSG_CHAT, message, speaker)
def command_emote(self, speaker, message):
self.takeMessage(MSG_EMOTE, message, speaker)
def command_look(self, looker, arg): #implemented by subclasses
looker.takeMessage(MSG_GAME_INFO, 'You see nothing.')
def command_status(self, checker, arg):
group = checker.party
checker.takeMessage(MSG_GAME_INFO, '[ Status of %s\'s group: ]' % checker.groupLeader.name)
maxnamelen = max([len(p.name) for p in group])
for player in group:
checker.takeMessage(MSG_GAME_INFO, '[ %*s: ]%s' % ( maxnamelen, player.name, player.statstring() ))
def command_help(self, player, arg):
player.takeMessage( MSG_GAME_INFO , 'Available game commands: %s' % ' '.join(self.commands) )
player.takeMessage( MSG_GAME_INFO , 'Your available abilities: %s' % player.abilstring() )
player.takeMessage( MSG_GAME_INFO , 'For server commands see \'/help\'')
def prompt(self, creature): #implemented by subclasses
pass
def allowsAbility(self, ability):
return False
def doAbility(self, doer, ability, arg):
if not self.allowsAbility(ability):
raise DisallowedPlaceError
if doer.mp < ability.mpcost:
raise NoMpError
usevars = {'doer': doer.name, 'arg': arg, 'mpcost': colstr(YELLOW, '(%d)' % ability.mpcost) }
if ability.attackpower:
aflags = ability.attackflags
if ability.attacktype == 'hit':
for status in doer.status.values():
if 'hitflags' in status.statusflags:
aflags = aflags | status.statusflags['hitflags']
targets = ability.acquireTargets(doer, arg)
if len(targets) > 1:
doer.mp -= ability.mpcost
if len(ability.usestring) > 0:
self.takeMessage(MSG_GAME_ATTACK, ability.usestring % usevars)
for target in targets:
power = ability.attackpower * doer.getstat(ability.attackattr)
basedmg = rng.gauss(power * 100, power * 18)
if target.hp <= 0 and 'lifedeath' not in ability.attackflags:
raise DeadTargetError
if len(targets) == 1:
doer.mp -= ability.mpcost
if len(ability.usestring) > 0:
usevars['targ'] = target.name
self.takeMessage(MSG_GAME_ATTACK, ability.usestring % usevars)
#reflect
#evasion
#cover
damage = ability.finalDamage(basedmg, target, 'hp')
hitvars = {'doer': doer.name, 'targ': target.name, 'verb': ability.hitverb, 'dam': colstr( damage < 0 and GREEN or RED , abs(damage) ) }
if ability.attacktype == 'hit':
hitvars['verb'] = doer.hitverb
self.takeMessage(MSG_GAME_ATTACK, ability.hitstring % hitvars)
target.hp -= damage
#lifedeath flag handling
target.hp = min(target.hp, target.hpmax)
if target.hp <= 0:
target.die()
else:
if ability.targetstatus is not None:
tstatus = copy.deepcopy(ability.targetstatus)
target.status[tstatus.name] = tstatus
#stun, steal, clearbuffs, cleardebuffs
#counter
if target is not doer:
target.prompt()
else:
doer.mp -= ability.mpcost
if len(ability.usestring) > 0:
self.takeMessage(MSG_GAME_ATTACK, ability.usestring % usevars)
for turnstatus in [s for s in doer.status if doer.status[s].duration == 'turn']:
del doer.status[turnstatus]
if ability.usestatus is not None:
ustatus = copy.deepcopy(ability.usestatus)
doer.status[ustatus.name] = ustatus
return True
class Zone(Place):
def __init__(self, world, file, name):
super(Zone, self).__init__()
self.world = world
self.file = file
self.name = name
self.fightprob = 0
self.discoveries = {}
self.lookmsg = []
self.exit = {}
self.fightprofiles = {}
self.flags = set()
self.commands.update({'explore': self.command_explore, 'travel': self.command_travel, 'join': self.command_join, 'leave': self.command_leave, 'heal': self.command_heal})
def prompt(self, creature):
group = creature.party
if len(group) > 1:
creature.takeMessage( MSG_GAME_INFO , '[%s\'s group: %s]' % (creature.groupLeader.name, ', '.join([c.name for c in group]) ))
def allowsAbility(self, ability):
try:
if 'zone' in ability.usable:
return True
except AttributeError:
if len( [a for a in ability if self.allowsAbility(ability[a])] ) > 0:
return True
return False
def command_look(self, looker, arg):
msg = 'You see nothing.'
for m in self.lookmsg:
if True: #evaluate m.req
msg = m.msg
looker.takeMessage(MSG_GAME_INFO, msg)
others = [c.name for c in self.creatures if c is not looker]
if len(others) == 0:
others = ['No one.']
looker.takeMessage(MSG_GAME_INFO, 'Here: %s' % ', '.join(others) )
#show discoveries, exits
def command_join(self, joiner, arg):
try:
joiner.joinGroup(self.namemap[arg.lower()])
except KeyError:
joiner.takeMessage(MSG_GAME_ERR, 'They\'re not here.')
except JoinSelfError:
joiner.takeMessage(MSG_GAME_ERR, 'You can\'t follow yourself, you\'d just go in circles!')
except GroupFullError:
joiner.takeMessage(MSG_GAME_ERR, 'Their group is full.')
def command_leave(self, leaver, arg):
if leaver.leader is None and len(leaver.group) <= 1:
joiner.takeMessage(MSG_GAME_ERR, 'You\'re not in a group.')
else:
leaver.leaveGroup()
def command_explore(self, player, arg):
if player.leader is not None:
raise NotLeaderError
possibleDiscoveries = []
for d in self.discoveries:
if rng.random() <= d.p: #and player's explore count is high enough
#evaluate reqs, then if they pass:
possibleDiscoveries.append(d)
#if there are any possibleDiscoveries, then we want the player to discover rng.choice(possibleDiscoveries)
if rng.random() <= self.fightprob:
fight = Fight(self)
mgroup = set()
nplayers = len(player.group)
for i in range( 0,rng.randint(nplayers, int(nplayers * 2.5)) ):
monster = self.world.newMonster('core', 'beast')
monster.name = 'Beast%d' % (i+1)
mgroup.add(monster)
monster.group = mgroup
fight.takeGroup( '%s\'s group' % player.name, player.group )
fight.takeGroup( 'Monsters', mgroup )
fight.start()
def command_travel(self, player, arg):
pass
def command_heal(self, player, arg): #temporary testing command
player.hp, player.mp = player.hpmax, player.mpmax
player.takeMessage(MSG_GAME_EVENT, 'The Goddess mends your wounds and cleanses your spirit.')
player.prompt()
class LookMessage(object):
def __init__(self, msg):
self.msg = msg
self.req = None
class Discovery(object):
def __init__(self):
self.explore = 0
self.p = 1
self.lookmsg = []
self.req = None
self.consq = None
class Fight(Place):
def __init__(self, zone):
super(Fight, self).__init__()
self.zone = zone
self.groups = {}
self.dead = set()
def prompt(self, creature):
for gname in self.groups:
creature.takeMessage( MSG_GAME_INFO , '[%s: %s]' % (gname, ', '.join([c.colorname for c in self.groups[gname]]) ))
def allowsAbility(self, ability):
try:
if 'fight' in ability.usable:
return True
except AttributeError:
if len( [a for a in ability if self.allowsAbility(ability[a])] ) > 0:
return True
return False
def doAbility(self, doer, ability, arg):
if not self.allowsAbility(ability):
raise DisallowedPlaceError
if doer.ready:
if super(Fight,self).doAbility(doer, ability, arg):
doer.ready = False
doer.stopReadyTimer()
doer.readyTimer = reactor.callLater(doer.getstat('delay'), doer.turnReady)
doer.prompt()
self.checkForEnd()
else:
raise NotReadyError
def takeGroup(self, name, group):
self.groups[name] = group
for creature in group:
self.takeCreature(creature)
def start(self):
self.takeMessage( MSG_GAME_EVENT , 'A fight starts!')
for creature in self.creatures:
creature.ready = False
creature.readyTimer = reactor.callLater(creature.getstat('delay') / (rng.random() + 1), creature.turnReady)
for creature in self.creatures:
creature.prompt(1)
def checkForEnd(self):
winners = [ g for g in self.groups if len([c for c in self.groups[g] if c.hp>0]) ]
if len(winners) < 2:
for w in winners:
self.takeMessage( MSG_GAME_EVENT , 'Victorious: %s!' % w)
#dole out exp
self.end()
def end(self):
for creature in self.creatures:
creature.stopReadyTimer()
for player in [c for c in self.creatures if c.__class__ is Player]:
self.zone.takeCreature(player)
player.hp = max(player.hp, 1)
player.ready = True
player.doCommand('%slook' % player.cmdmode)
player.prompt()
class World(object):
def __init__(self):
self.worldFiles = {}
self.defaultZone = None
self.roles = {}
def newPlayer(self, owner):
char = Player(owner)
char.group.add(char)
return char
def newMonster(self, file, name):
monster = Monster()
monster.copyOther(self.worldFiles[file].creatures[name])
return monster
def assignRole(self, char, rolename):
try:
char.copyOther(self.roles[rolename])
return True
except KeyError:
return False
def roleList(self):
return '{ %s }' % ' | '.join(self.roles)
class WorldFile(object):
def __init__(self, name):
self.name = name
self.zones = {}
self.creatures = {}
self.abilities = {}
class PlayerState(object):
def __init__(self):
self.owner = None
self.zone = None
self.discovered = {}
self.explore = 0 | Python |
from twisted.internet import protocol, defer, reactor
from twisted.internet.task import LoopingCall
from twisted.protocols import basic
import re
import time
from formatting import *
try:
set
except NameError:
from sets import Set as set
CONSTATE_NAME, CONSTATE_PWD, CONSTATE_ROLE, CONSTATE_MAIN = range(0,4)
TIMEOUTTIME = 1800
class ChatProtocol(basic.LineReceiver):
state = CONSTATE_NAME
# 0: waiting for nickname
# 1: waiting for password
# 2: logged in
new = 0
nickname = None
character = None
def connectionMade(self):
#welcome and prompt for name or new
self.message( MSG_SYSTEM , self.factory.motd )
#self.message( MSG_SYS_EVENT , 'Login: (\'new\' for new)' )
self.message( MSG_SYS_EVENT , 'Enter name:' )
def connectionLost(self, reason):
self.factory.unregister(self.nickname, self)
self.character.quit()
def lineReceived(self, line):
if self.state == CONSTATE_NAME: #receiving name
#if line == 'new':
#else:
self.nickname = line
if re.match(r'\A[\w!"$-.:-@[-`{-~]{1,15}\Z', self.nickname) is None or self.nickname.lower() in ('new', 'all', 'self'):
self.message( MSG_SYS_ERR , 'Invalid name.' )
elif self.factory.register(self.nickname, self):
self.character = self.factory.world.newPlayer(self)
self.message( MSG_SYS_INFO , 'Choose a character role: %s' % self.factory.world.roleList() )
self.state = CONSTATE_ROLE
else:
self.message( MSG_SYS_ERR , 'Name in use.' )
elif self.state == CONSTATE_PWD: #receiving password
state = CONSTATE_NAME #not yet implemented, so get out
elif self.state == CONSTATE_ROLE: #choosing character role
if self.factory.world.assignRole(self.character, line):
self.message( MSG_SYS_INFO , 'Welcome!' )
self.state = CONSTATE_MAIN
self.factory.world.defaultZone.takeCreature(self.character)
self.character.doCommand('%slook' % self.character.cmdmode)
else:
self.message( MSG_SYS_ERR , 'Invalid role. Choose: %s' % self.factory.world.roleList() )
elif self.state == CONSTATE_MAIN: #main logged in
if line.startswith('/'): #server command
command, _, arg = line[1:].partition(' ')
func = self.commands.get(command, None)
if func is None:
self.message( MSG_SYS_ERR , 'Invalid command \'%s\'' % command )
else:
func(self, arg)
else: #game command, send to character
self.character.doCommand(line)
def message(self, type, line, creature = None):
if type == MSG_SYSTEM:
line = colstr( MAGENTA, line )
elif type == MSG_SYS_INFO:
line = colstr( YELLOW, line )
elif type == MSG_SYS_EVENT:
line = colstr( GREEN, line )
elif type == MSG_SYS_ERR:
line = colstr( RED, line )
elif type == MSG_CHAT:
line = '%s %s' % (colstr( creature is self.character and CYAN or YELLOW, '<%s>' % creature.name), line)
elif type == MSG_EMOTE:
line = colstr( YELLOW, '<%s %s>' % (creature.name, line))
elif type == MSG_GAME_INFO:
line = colstr( YELLOW, line )
elif type == MSG_GAME_EVENT:
line = colstr( GREEN, line )
if creature is self.character:
line = colstr( BOLD, line.replace(creature.name, 'You').replace('is', 'are') )
elif type == MSG_GAME_ERR:
line = colstr( RED, line )
self.sendLine(line)
def command_quit(self, arguments):
self.message( MSG_SYSTEM , 'Bye!' )
self.transport.loseConnection()
def command_time(self, arguments):
self.message( MSG_SYS_INFO , 'System time: %s' % time.asctime() )
def command_cmdmode(self, arguments):
if arguments:
self.character.cmdmode = arguments
self.message( MSG_SYS_EVENT , 'Set game command prefix to \'%s\'.' % arguments )
else:
self.character.cmdmode = ''
self.message( MSG_SYS_EVENT , 'Cleared game command prefix.')
def command_help(self, arguments):
self.message( MSG_SYS_INFO , 'Available server commands: %s' % ' '.join(self.commands) )
self.message( MSG_SYS_INFO , 'For game commands see \'%shelp\'' % self.character.cmdmode)
commands = {'quit': command_quit, 'time': command_time, 'cmdmode': command_cmdmode, 'help': command_help, '?': command_help}
class ChatFactory(protocol.ServerFactory):
protocol = ChatProtocol
def __init__(self):
self.active_clients = set()
self.illegal_names = set()
self.nickname_map = {}
self.world = None
self.motd = ''
def register(self, nickname, listener):
if nickname.lower() in self.nickname_map:
return False
self.active_clients.add(listener)
self.nickname_map[nickname.lower()] = listener
self.sysMessage(MSG_SYSTEM, '%s connected.' % nickname)
return True
def unregister(self, nickname, listener):
self.active_clients.remove(listener)
del self.nickname_map[nickname.lower()]
self.sysMessage(MSG_SYSTEM, '%s disconnected.' % nickname)
def sysMessage(self, type, message):
for client in self.active_clients:
client.message(type, message) | Python |
port = 4002
motd = 'Frantic Fallacy 2a1'
worldpath = 'data/world'
playerpath = 'data/player' | Python |
#!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the
command line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. For more info, see
"OUTPUT FOR --helpxml" below.
--flagfile=foo read flags from file foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
FLAGS VALIDATORS: If your program:
- requires flag X to be specified
- needs flag Y to match a regular expression
- or requires any more general constraint to be satisfied
then validators are for you!
Each validator represents a constraint over one flag, which is enforced
starting from the initial parsing of the flags and until the program
terminates.
Also, lower_bound and upper_bound for numerical flags are enforced using flag
validators.
Howto:
If you want to enforce a constraint over one flag, use
flags.RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS)
After flag values are initially parsed, and after any change to the specified
flag, method checker(flag_value) will be executed. If constraint is not
satisfied, an IllegalFlagValue exception will be raised. See
RegisterValidator's docstring for a detailed explanation on how to construct
your own checker.
EXAMPLE USAGE:
FLAGS = flags.FLAGS
flags.DEFINE_integer('my_version', 0, 'Version number.')
flags.DEFINE_string('filename', None, 'Input file name', short_name='f')
flags.RegisterValidator('my_version',
lambda value: value % 2 == 0,
message='--my_version must be divisible by 2')
flags.MarkFlagAsRequired('filename')
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
import gflags
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %s, who is %d years old' % (FLAGS.gender, FLAGS.age)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module):
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path (in
addition to the special flags --help and --helpshort).
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
OUTPUT FOR --helpxml:
The --helpxml flag generates output with the following structure:
<?xml version="1.0"?>
<AllFlags>
<program>PROGRAM_BASENAME</program>
<usage>MAIN_MODULE_DOCSTRING</usage>
(<flag>
[<key>yes</key>]
<file>DECLARING_MODULE</file>
<name>FLAG_NAME</name>
<meaning>FLAG_HELP_MESSAGE</meaning>
<default>DEFAULT_FLAG_VALUE</default>
<current>CURRENT_FLAG_VALUE</current>
<type>FLAG_TYPE</type>
[OPTIONAL_ELEMENTS]
</flag>)*
</AllFlags>
Notes:
1. The output is intentionally similar to the output generated by the
C++ command-line flag library. The few differences are due to the
Python flags that do not have a C++ equivalent (at least not yet),
e.g., DEFINE_list.
2. New XML elements may be added in the future.
3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
pass for this flag on the command-line. E.g., for a flag defined
using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
4. CURRENT_FLAG_VALUE is produced using str(). This means that the
string 'false' will be represented in the same way as the boolean
False. Using repr() would have removed this ambiguity and simplified
parsing, but would have broken the compatibility with the C++
command-line flags.
5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
flags: lower_bound, upper_bound (for flags that specify bounds),
enum_value (for enum flags), list_separator (for flags that consist of
a list of values, separated by a special token).
6. We do not provide any example here: please use --helpxml instead.
"""
import cgi
import getopt
import os
import re
import string
import sys
import gflags_validators
# Are we running at least python 2.2?
try:
if tuple(sys.version_info[:3]) < (2,2,0):
raise NotImplementedError("requires python 2.2.0 or later")
except AttributeError: # a very old python, that lacks sys.version_info
raise NotImplementedError("requires python 2.2.0 or later")
# If we're not running at least python 2.2.1, define True, False, and bool.
# Thanks, Guido, for the code.
try:
True, False, bool
except NameError:
False = 0
True = 1
def bool(x):
if x:
return True
else:
return False
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModule():
"""Returns the name of the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
globals_for_frame = sys._getframe(depth).f_globals
module_name = _GetModuleObjectAndName(globals_for_frame)[1]
if module_name is not None:
return module_name
raise AssertionError("No module was found")
def _GetThisModuleObjectAndName():
"""Returns: (module object, module name) for this module."""
return _GetModuleObjectAndName(globals())
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
"""Special case of DuplicateFlag -- SWIG flag value can't be set to None.
This can be raised when a duplicate flag is created. Even if allow_override is
True, we still abort if the new value is None, because it's currently
impossible to pass None default value back to SWIG. See FlagValues.SetDefault
for details.
"""
pass
# A DuplicateFlagError conveys more information than a
# DuplicateFlag. Since there are external modules that create
# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
class DuplicateFlagError(DuplicateFlag):
def __init__(self, flagname, flag_values):
self.flagname = flagname
message = "The flag '%s' is defined twice." % self.flagname
flags_by_module = flag_values.FlagsByModuleDict()
for module in flags_by_module:
for flag in flags_by_module[module]:
if flag.name == flagname or flag.short_name == flagname:
message = message + " First from " + module + ","
break
message = message + " Second from " + _GetCallingModule()
DuplicateFlag.__init__(self, message)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an UnrecognizedFlag.
# Since there are external modules that create DuplicateFlags, the interface to
# DuplicateFlag shouldn't change. The flagvalue will be assigned the full value
# of the flag and its argument, if any, allowing handling of unrecognzed flags
# in an exception handler.
# If flagvalue is the empty string, then this exception is an due to a
# reference to a flag that was not already defined.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname, flagvalue=''):
self.flagname = flagname
self.flagvalue = flagvalue
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophy and then align the following lines while others have the
apostrophies on a seperately line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitesppace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespaces into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First iline indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that caanot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an emoty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def _GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A pair consisting of (1) module object and (2) module name (a
string). Returns (None, None) if the module could not be
identified.
"""
# The use of .items() (instead of .iteritems()) is NOT a mistake: if
# a parallel thread imports a module while we iterate over
# .iteritems() (not nice, but possible), we get a RuntimeError ...
# Hence, we use the slightly slower but safer .items().
for name, module in sys.modules.items():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
# Pick a more informative name for the main module.
name = sys.argv[0]
return (module, name)
return (None, None)
def _GetMainModule():
"""Returns the name of the module from which execution started."""
for depth in range(1, sys.getrecursionlimit()):
try:
globals_of_main = sys._getframe(depth).f_globals
except ValueError:
return _GetModuleObjectAndName(globals_of_main)[1]
raise AssertionError("No module was found")
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
self[flag_name] = flag
def RemoveFlagValues(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: registry containing flags to remove.
"""
for flag_name in flag_values.FlagDict():
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (fl.has_key(name) and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (fl.has_key(short_name) and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
self._AssertValidators(fl[name].validators)
return value
def _AssertAllValidators(self):
all_validators = set()
for flag in self.FlagDict().itervalues():
for validator in flag.validators:
all_validators.add(validator)
self._AssertValidators(all_validators)
def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(gflags_validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValue: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.Verify(self)
except gflags_validators.Error, e:
message = validator.PrintFlagsWithValues(self)
raise IllegalFlagValue('%s: %s' % (message, str(e)))
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
flags.DEFINE_integer('foo', 1, 'Integer flag.')
del flags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError(name)
fl[name].SetDefault(value)
self._AssertValidators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return self.FlagDict().iterkeys()
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, reraise the exception as a FlagsError
raise FlagsError(e)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
(args[arg_index].startswith('--' + e.opt + '='))):
unrecognized_opts.append((e.opt, args[arg_index]))
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if fl.has_key(name):
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt, value in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt, value)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
ret_val = argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
ret_val = argv[:1] + original_argv[-len(unparsed_args):]
else:
ret_val = argv[:1]
self._AssertAllValidators()
return ret_val
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return self.FlagDict().keys()
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = flags_by_module.keys()
modules.sort()
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flagset.has_key(flag): continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
print e_msg
print 'ERROR:: Unable to open flagfile: %s' % (filename)
return flag_line_list
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s'
% sub_filename)
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguements
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv[0:0] = self.__GetFlagFileLines(flag_filename, parsed_file_list)
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ function
CommandlineFlagsIntoString from google3/base/commandlineflags.cc.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ version of
AppendFlagsIntoFile from google3/base/commandlineflags.cc.
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
google3/base/commandlineflags_reporting.cc. We also use a few new
elements (e.g., <key>), but we do not interfere / overlap with
existing XML elements used by the C++ library. Please maintain this
consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
def AddValidator(self, validator):
"""Register new flags validator to be checked.
Args:
validator: gflags_validators.Validator
Raises:
AttributeError: if validators work with a non-existing flag.
"""
for flag_name in validator.GetFlagsNames():
flag = self.FlagDict()[flag_name]
flag.validators.append(validator)
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = str(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
outfile.write('%s<%s>%s</%s>\n' %
(indent, name, _MakeXMLSafe(value_str), name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.validators = []
self.SetDefault(default)
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(str(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
# TODO(olexiy): Users can directly call this method, bypassing all flags
# validators (we don't have FlagValues here, so we can not check
# validators).
# The simplest solution I see is to make this method private.
# Another approach would be to store reference to the corresponding
# FlagValues with each flag, but this seems to be an overkill.
if value is None and self.allow_override:
raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
default_serialized = self.serializer.Serialize(self.default)
else:
default_serialized = self.default
_WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(mcs, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for mcs with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
args: Positional initializer arguments.
kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(mcs, *args, **kwargs)
else:
instances = mcs._instances
key = (mcs,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(mcs, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(mcs, *args)
class ArgumentParser(object):
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
__metaclass__ = _ArgumentParserCache
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return str(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([str(x) for x in value])
# Flags validators
def RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: string, name of the flag to be checked.
checker: method to validate the flag.
input - value of the corresponding flag (string, boolean, etc.
This value will be passed to checker by the library). See file's
docstring for examples.
output - Boolean.
Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags_validators.Error(desired_error_message).
message: error text to be shown to the user if checker returns False.
If checker raises gflags_validators.Error, message from the raised
Error will be shown.
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name,
checker,
message))
def MarkFlagAsRequired(flag_name, flag_values=FLAGS):
"""Ensure that flag is not None during program execution.
Registers a flag validator, which will follow usual validator
rules.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
RegisterValidator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values):
"""Enforce lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser). Provides lower
and upper bounds, and help text to display.
name: string, name of the flag
flag_values: FlagValues
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def Checker(value):
if value is not None and parser.IsOutsideBounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise gflags_validators.Error(message)
return True
RegisterValidator(name,
Checker,
flag_values=flag_values)
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
flag_values._RegisterFlagByModule(_GetCallingModule(), flag)
def _InternalDeclareKeyFlags(flag_names,
flag_values=FLAGS, key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object that the flags listed in
flag_names have registered with (the value of the flag_values
argument from the DEFINE_* calls that defined those flags).
This should almost never need to be overridden.
key_flag_values: A FlagValues object that (among possibly many
other things) keeps track of the key flags for each module.
Default None means "same as flag_values". This should almost
never need to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
key_flag_values = key_flag_values or flag_values
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
flags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _SPECIAL_FLAGS into account.
if module == _GetThisModuleObjectAndName()[0]:
_InternalDeclareKeyFlags(
# As we associate flags with _GetCallingModule(), the special
# flags defined in this module are incorrectly registered with
# a different module. So, we can't use _GetKeyFlagsForModule.
# Instead, we take all flags from _SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
# and the special HELP flags.
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# Numeric parser - base class for Integer and Float parsers
#
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def IsOutsideBounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def Parse(self, argument):
val = self.Convert(argument)
if self.IsOutsideBounds(val):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
def Convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
# End of Numeric Parser
#
# FLOAT FLAGS
#
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Type(self):
return 'float'
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# INTEGER FLAGS
#
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
try:
return int(argument, base)
# ValueError is thrown when argument is a string, and overflows an int.
except ValueError:
return long(argument, base)
else:
try:
return int(argument)
# OverflowError is thrown when argument is numeric, and overflows an int.
except OverflowError:
return long(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
super(EnumParser, self).__init__()
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
| Python |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| Python |
#!/usr/bin/python2.5
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for Outlook CSV file data."""
__author__ = "dwightguth@google.com (Dwight Guth)"
import csv
import datetime
import StringIO
import model
class Parser(object):
"""Parses CSV export from Outlook into App Engine datastore entities."""
def __init__(self, tasklist):
"""Creates a new Parser object.
Args:
tasklist: the tasklist to put the parsed tasks into.
"""
self.tasklist = tasklist
def ParseAndStore(self, csv_data):
"""Parses the provided data and stores the resulting entities.
Args:
csv_data: the text of a CSV file to be parsed for todo objects.
Returns:
The list of entities created by parsing csv_data.
"""
csv_reader = csv.DictReader(StringIO.StringIO(csv_data))
results = []
for row in csv_reader:
results.append(self.ParseItem(row))
return results
def ParseItem(self, item):
"""Parses a single CSV row and stores the resulting entity.
Args:
item: a csv row object representing an Outlook todo item.
Returns:
The entity created by parsing item.
"""
task = model.Task()
if self.tasklist:
task.parent_entity = self.tasklist
if item["Subject"]:
task.title = item["Subject"]
else:
# we need a title so if it's not there we use the empty string
task.title = ""
if item["Notes"]:
task.notes = item["Notes"]
if item["Due Date"]:
task.due = datetime.datetime.strptime(item["Due Date"], "%m/%d/%Y").date()
if item["Date Completed"]:
task.completed = datetime.datetime.strptime(item["Date Completed"],
"%m/%d/%Y")
if item["Status"]:
if item["Status"] == "Complete":
task.status = "completed"
else:
task.status = "needsAction"
task.put()
return task
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'HttpRequest', 'RequestMockBuilder', 'HttpMock'
'set_user_agent', 'tunnel_patch'
]
import httplib2
import os
from model import JsonModel
from errors import HttpError
from anyjson import simplejson
class HttpRequest(object):
"""Encapsulates a single HTTP request.
"""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.http = http
self.postproc = postproc
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content) that should be returned when that
method is called. None may also be passed in for the httplib2.Response, in
which case a 200 OK response will be generated.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'chili.activities.get': (None, response),
}
)
apiclient.discovery.build("buzz", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content. The methodId
is taken from the rpcName in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
"""
self.responses = responses
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
resp, content = self.responses[methodId]
return HttpRequestMock(resp, content, postproc)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
from google.appengine.ext import db
from client import Credentials
from client import Flow
from client import Storage
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
apiclient.oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Credentials):
raise BadValueError('Property %s must be convertible '
'to an Credentials instance (%s)' %
(self.name, value))
return super(CredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
apiclient.oauth2client.Credentials
"""
entity = self._model.get_or_insert(self._key_name)
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import logging
import socket
import sys
from optparse import OptionParser
from client import FlowExchangeError
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = BaseHTTPServer.HTTPServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = 'oob'
authorize_url = flow.step1_get_authorize_url(oauth_callback)
print 'Go to the following link in your browser:'
print authorize_url
print
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credentials = flow.step2_exchange(code)
except FlowExchangeError:
sys.exit('The authentication has failed.')
storage.put(credentials)
credentials.set_store(storage.put)
print "You have successfully authenticated."
return credentials
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client
Tools for interacting with OAuth 2.0 protected
resources.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import datetime
import httplib2
import logging
import urllib
import urlparse
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth2client.client.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent):
"""Create an instance of OAuth2Credentials
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
token_uri: string, URI of token endpoint.
client_id: string, client identifier.
client_secret: string, client secret.
access_token: string, access token.
token_expiry: datetime, when the access_token expires.
refresh_token: string, refresh token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
# True if the credentials have been revoked or expired and can't be
# refreshed.
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, '_invalid', False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled.
"""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled.
"""
self.__dict__.update(state)
self.store = None
def _refresh(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http: An instance of httplib2.Http.request
or something that acts like it.
"""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token' : self.refresh_token
})
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
logging.info("Refresing access_token")
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds = int(d['expires_in'])) + datetime.datetime.now()
else:
self.token_expiry = None
if self.store is not None:
self.store(self)
else:
# An {'error':...} response body means the token is expired or revoked, so
# we flag the credentials as such.
logging.error('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self._invalid = True
if self.store is not None:
self.store(self)
else:
logging.warning("Unable to store refreshed credentials, no Storage provided.")
except:
pass
raise AccessTokenRefreshError(error_msg)
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
if headers == None:
headers = {}
headers['authorization'] = 'OAuth ' + self.access_token
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
if resp.status == 401:
logging.info("Refreshing because we got a 401")
self._refresh(request_orig)
headers['authorization'] = 'OAuth ' + self.access_token
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
http.request = new_request
return http
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token. This set of credentials is for the use case where you have
acquired an OAuth 2.0 access_token from another place such as a JavaScript
client or another web application, and wish to use it from Python. Because
only the access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, client_id, client_secret, scope, user_agent,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string, scope of the credentials being requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = kwargs
self.redirect_uri = None
def step1_get_authorize_url(self, redirect_uri='oob'):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'oob' for a non-web-based
application, or a URI that handles the callback from
the authorization server.
If redirect_uri is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
self.redirect_uri = redirect_uri
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope
})
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if simplejson.loads fails?
d = simplejson.loads(content)
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.now() + datetime.timedelta(seconds = int(d['expires_in']))
logging.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id, self.client_secret,
refresh_token, token_expiry, self.token_uri,
self.user_agent)
else:
logging.error('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except:
pass
raise FlowExchangeError(error_msg)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from client import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth2client.client.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import apiclient.oauth2client
import base64
import pickle
from django.db import models
from apiclient.oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self, connection=None):
return 'VARCHAR'
def to_python(self, value):
if not value:
return None
if isinstance(value, apiclient.oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self, connection=None):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
apiclient.oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build', 'build_from_document'
]
import httplib2
import logging
import os
import re
import uritemplate
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from http import HttpRequest
from anyjson import simplejson
from model import JsonModel
from errors import UnknownLinkType
from errors import HttpError
from errors import InvalidJsonError
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Query parameters that work, but don't appear in discovery
STACK_QUERY_PARAMETERS = ['trace', 'fields', 'pp', 'prettyPrint', 'userIp',
'strict']
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName, version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with
an API. The serviceName and version are the
names from the Discovery service.
Args:
serviceName: string, name of the service
version: string, the version of the service
discoveryServiceUrl: string, a URI Template that points to
the location of the discovery service. It should have two
parameters {api} and {apiVersion} that when filled in
produce an absolute URI to the discovery document for
that service.
developerKey: string, key obtained
from https://code.google.com/apis/console
model: apiclient.Model, converts to and from the wire format
requestBuilder: apiclient.http.HttpRequest, encapsulator for
an HTTP request
Returns:
A Resource object with methods for interacting with
the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
logging.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status > 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logging.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
fn = os.path.join(os.path.dirname(__file__), 'contrib',
serviceName, 'future.json')
try:
f = file(fn, 'r')
future = f.read()
f.close()
except IOError:
future = None
return build_from_document(content, discoveryServiceUrl, future,
http, developerKey, model, requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object
from a discovery document that is it given, as opposed to
retrieving one over HTTP.
Args:
service: string, discovery document
base: string, base URI for all HTTP requests, usually the discovery URI
future: string, discovery document with future capabilities
auth_discovery: dict, information about the authentication the API supports
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and
de-serializes requests and responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with
the service.
"""
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
if future:
future = simplejson.loads(future)
auth_discovery = future.get('auth', {})
else:
future = {}
auth_discovery = {}
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = createResource(http, base, model, requestBuilder, developerKey,
service, future)
def auth_method():
"""Discovery information about the authentication the API uses."""
return auth_discovery
setattr(resource, 'auth_discovery', auth_method)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
def createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, futureDesc):
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, futureDesc):
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH']:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if re.match(regex, kwargs[name]) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, kwargs[name], regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
if kwargs[name] not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, kwargs[name], str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
headers = {}
headers, params, query, body = self._model.request(headers,
actual_path_params, actual_query_params, body_value)
# TODO(ade) This exists to fix a bug in V1 of the Buzz discovery
# document. Base URLs should not contain any path elements. If they do
# then urlparse.urljoin will strip them out This results in an incorrect
# URL which returns a 404
url_result = urlparse.urlsplit(self._baseUrl)
new_base_url = url_result[0] + '://' + url_result[1]
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl,
url_result[2] + expanded_url + query)
logging.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
self._model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
for arg in argmap.iterkeys():
if arg in STACK_QUERY_PARAMETERS:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, futureDesc):
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous):
"""
Takes a single argument, 'body', which is the results
from the last call, and returns the next set of items
in the collection.
Returns None if there are no more items in
the collection.
"""
if futureDesc['type'] != 'uri':
raise UnknownLinkType(futureDesc['type'])
try:
p = previous
for key in futureDesc['location']:
p = p[key]
url = p
except (KeyError, TypeError):
return None
if self._developerKey:
parsed = list(urlparse.urlparse(url))
q = parse_qsl(parsed[4])
q.append(('key', self._developerKey))
parsed[4] = urllib.urlencode(q)
url = urlparse.urlunparse(parsed)
headers = {}
headers, params, query, body = self._model.request(headers, {}, {}, None)
logging.info('URL being requested: %s' % url)
resp, content = self._http.request(url, method='GET', headers=headers)
return self._requestBuilder(self._http,
self._model.response,
url,
method='GET',
headers=headers,
methodId=methodId)
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if futureDesc:
future = futureDesc['methods'].get(methodName, {})
else:
future = None
createMethod(Resource, methodName, methodDesc, future)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, futureDesc):
def methodResource(self):
return createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, futureDesc)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
if futureDesc and 'resources' in futureDesc:
future = futureDesc['resources'].get(methodName, {})
else:
future = {}
createResourceMethod(Resource, methodName, methodDesc, future)
# Add <m>_next() methods to Resource
if futureDesc and 'methods' in futureDesc:
for methodName, methodDesc in futureDesc['methods'].iteritems():
if 'next' in methodDesc and methodName in resourceDesc['methods']:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodDesc['next'])
return Resource()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use the
Google API Client for Python on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
from google.appengine.ext import db
from apiclient.oauth import OAuthCredentials
from apiclient.oauth import FlowThreeLegged
class FlowThreeLeggedProperty(db.Property):
"""Utility property that allows easy
storage and retreival of an
apiclient.oauth.FlowThreeLegged"""
# Tell what the user type is.
data_type = FlowThreeLegged
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowThreeLeggedProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, FlowThreeLegged):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowThreeLeggedProperty, self).validate(value)
def empty(self, value):
return not value
class OAuthCredentialsProperty(db.Property):
"""Utility property that allows easy
storage and retrieval of
apiclient.oath.OAuthCredentials
"""
# Tell what the user type is.
data_type = OAuthCredentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(OAuthCredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, OAuthCredentials):
raise BadValueError('Property %s must be convertible '
'to an OAuthCredentials instance (%s)' %
(self.name, value))
return super(OAuthCredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(object):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
"""
self.model = model
self.key_name = key_name
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
Credentials
"""
entity = self.model.get_or_insert(self.key_name)
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self.model.get_or_insert(self.key_name)
setattr(entity, self.property_name, credentials)
entity.put()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 1.0
Do the OAuth 1.0 Three Legged Dance for
a command line application. Stores the generated
credentials in a common file that is used by
other example apps in the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ["run"]
import BaseHTTPServer
import logging
import socket
import sys
from optparse import OptionParser
from apiclient.oauth import RequestError
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 1.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 1.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
Returns:
Credentials, the obtained credential.
Exceptions:
RequestError: if step2 of the flow fails.
Args:
"""
parser = OptionParser()
parser.add_option("-p", "--no_local_web_server", dest="localhost",
action="store_false",
default=True,
help="Do not run a web server on localhost to handle redirect URIs")
parser.add_option("-w", "--local_web_server", dest="localhost",
action="store_true",
default=True,
help="Run a web server on localhost to handle redirect URIs")
(options, args) = parser.parse_args()
host_name = 'localhost'
port_numbers = [8080, 8090]
if options.localhost:
server_class = BaseHTTPServer.HTTPServer
try:
port_number = port_numbers[0]
httpd = server_class((host_name, port_number), ClientRedirectHandler)
except socket.error:
port_number = port_numbers[1]
try:
httpd = server_class((host_name, port_number), ClientRedirectHandler)
except socket.error:
options.localhost = False
if options.localhost:
oauth_callback = 'http://%s:%s/' % (host_name, port_number)
else:
oauth_callback = 'oob'
authorize_url = flow.step1_get_authorize_url(oauth_callback)
print 'Go to the following link in your browser:'
print authorize_url
print
if options.localhost:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'oauth_verifier' in httpd.query_params:
code = httpd.query_params['oauth_verifier']
else:
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credentials = flow.step2_exchange(code)
except RequestError:
sys.exit('The authentication has failed.')
storage.put(credentials)
credentials.set_store(storage.put)
print "You have successfully authenticated."
return credentials
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 1.0 credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from apiclient.oauth import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from anyjson import simplejson
from errors import HttpError
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a response
from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content.
"""
if self.resp.get('content-type', '').startswith('application/json'):
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
reason = self.content
else:
reason = self.resp.reason
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
| Python |
#!/usr/bin/python2.5
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Request Handler for Task Queue Tasks."""
__author__ = "dwightguth@google.com (Dwight Guth)"
import logging
from apiclient import discovery
from apiclient.oauth2client import appengine
from apiclient.oauth2client import client
from google.appengine.api import apiproxy_stub_map
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import httplib2
from common import apiparse
from common import apiupload
import csvparse
import icalparse
import model
def urlfetch_timeout_hook(service, call, request, response):
if call != 'Fetch':
return
# Make the default deadline 30 seconds instead of 5.
if not request.has_deadline():
request.set_deadline(30.0)
class DeleteWorker(webapp.RequestHandler):
"""Handler for /worker/delete."""
def post(self):
"""Handles POST requests for /worker/delete."""
snapshot = model.Snapshot.gql("WHERE __key__ = KEY('Snapshot', :key)",
key=int(self.request.get("id"))).get()
task_entities = model.Task.gql("WHERE ANCESTOR IS :id",
id=snapshot.key())
for ent in task_entities:
ent.delete()
tasklist_entities = model.TaskList.gql("WHERE ANCESTOR IS :id",
id=snapshot.key())
for ent in tasklist_entities:
ent.delete()
snapshot.delete()
class SnapshotWorker(webapp.RequestHandler):
"""Handler for /worker/snapshot."""
def post(self):
"""Handles POST requests for /worker/snapshot."""
snapshot = model.Snapshot.gql("WHERE __key__ = KEY('Snapshot', :key)",
key=int(self.request.get("id"))).get()
user = snapshot.user
credentials = appengine.StorageByKeyName(
model.Credentials, user.user_id(), "credentials").get()
if credentials is None or credentials.invalid == True:
snapshot.status = "error"
snapshot.errorMessage = "Must be logged in to create snapshot."
snapshot.put()
else:
try:
http = httplib2.Http()
http = credentials.authorize(http)
service = discovery.build("tasks", "v1", http)
tasklists = service.tasklists()
tasklists_list = tasklists.list().execute()
parser = apiparse.Parser(model.TaskList, None, snapshot, tasklists.list,
model)
tasklist_entities = parser.ParseAndStore(tasklists_list)
for tasklist in tasklist_entities:
tasks = service.tasks()
tasks_list = tasks.list(tasklist=tasklist.id,
showHidden=True).execute()
parser = apiparse.Parser(model.Task,
tasklist,
snapshot,
tasks.list,
model,
tasklist=tasklist.id,
showHidden=True)
parser.ParseAndStore(tasks_list)
snapshot.status = "completed"
snapshot.put()
except client.AccessTokenRefreshError, e:
snapshot.status = "error"
snapshot.errorMessage = "OAuth credentials were revoked."
logging.info(e, exc_info=True)
snapshot.put()
except Exception, e:
snapshot.status = "error"
snapshot.errorMessage = "Snapshot creation process failed unexpectedly."
logging.error(e, exc_info=True)
snapshot.put()
class ImportWorker(webapp.RequestHandler):
"""Handler for /worker/import."""
def post(self):
logging.info(self.request.get("id"))
"""Handles POST requests for /worker/snapshot."""
snapshot = model.Snapshot.gql("WHERE __key__ = KEY('Snapshot', :key)",
key=int(self.request.get("id"))).get()
user = snapshot.user
credentials = appengine.StorageByKeyName(
model.Credentials, user.user_id(), "credentials").get()
if credentials is None or credentials.invalid == True:
snapshot.status = "error"
snapshot.errorMessage = "Must be logged in to create snapshot."
snapshot.put()
else:
try:
http = httplib2.Http()
http = credentials.authorize(http)
service = discovery.build("tasks", "v1", http)
tasklist = model.TaskList(parent=snapshot)
tasklist.title = self.request.get("name")
tasklist.put()
if self.request.get("format") == "ics":
try:
parser = icalparse.Parser(tasklist)
tasks_list = parser.ParseAndStore(self.request.get("file"))
except Exception, e:
snapshot.status = "error"
snapshot.errorMessage = "The iCalendar file was malformed."
logging.info(e, exc_info=True)
snapshot.put()
return
elif self.request.get("format") == "csv":
try:
parser = csvparse.Parser(tasklist)
tasks_list = parser.ParseAndStore(self.request.get("file"))
except Exception, e:
snapshot.status = "error"
snapshot.errorMessage = "The CSV file was malformed."
logging.info(e, exc_info=True)
snapshot.put()
return
else:
tasks_list = []
tasklists = service.tasklists()
uploader = apiupload.Uploader(tasklists.insert)
tasklist_id = uploader.Upload([tasklist])[0]
tasks = service.tasks()
uploader = apiupload.Uploader(tasks.insert, tasklist=tasklist_id,
previous=apiupload.PREVIOUS_ARGUMENT)
uploader.Upload(tasks_list)
snapshot.status = "completed"
snapshot.put()
except client.AccessTokenRefreshError, e:
snapshot.status = "error"
snapshot.errorMessage = "OAuth credentials were revoked."
logging.info(e, exc_info=True)
snapshot.put()
except Exception, e:
snapshot.status = "error"
snapshot.errorMessage = "Snapshot creation process failed unexpectedly."
logging.error(e, exc_info=True)
snapshot.put()
def main():
apiproxy_stub_map.apiproxy.GetPreCallHooks().Append(
'urlfetch_timeout_hook', urlfetch_timeout_hook, 'urlfetch')
application = webapp.WSGIApplication(
[
("/worker/delete", DeleteWorker),
("/worker/import", ImportWorker),
("/worker/snapshot", SnapshotWorker),
])
util.run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| Python |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
import errno
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except (AttributeError, ImportError):
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
self.host, self.port, 0, socket.SOCK_STREAM):
try:
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""
The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status == 303:
redirect_method = "GET"
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| Python |
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
import struct
import sys
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
# Small hack for Python 2.x
if sys.version_info[0] <= 2:
def bytes(obj, enc=None):
return obj
class ProxyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GeneralProxyError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5AuthError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks4Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class HTTPError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
("request rejected because SOCKS server cannot connect to "
"identd on the client"),
("request rejected because the client program and identd"
" report different user-ids"),
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True,
username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=0, _sock=None):
socket.socket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __decode(self, bytes):
if getattr(bytes, 'decode', False):
try:
bytes = bytes.decode()
except Exception:
pass
return bytes
def __encode(self, bytes):
if getattr(bytes, 'encode', False):
try:
bytes = bytes.encode()
except Exception:
pass
return bytes
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = bytes("")
while len(data) < count:
d = self.recv(count - len(data))
if not d:
raise GeneralProxyError(
(0, "connection closed unexpectedly"))
data = data + self.__decode(d)
return data
def sendall(self, bytes):
socket.socket.sendall(self, self.__encode(bytes))
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True,
username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4] != None) and (self.__proxy[5] != None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall("\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall("\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0] != "\x05":
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1] == "\x00":
# No authentication is required
pass
elif chosenauth[1] == "\x02":
# Okay, we need to perform a basic username/password
# authentication.
self.sendall("\x01" + chr(len(self.__proxy[4])) + self.__proxy[4] +
chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0] != "\x01":
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1] != "\x00":
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == "\xFF":
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = "\x05\x01\x00"
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + "\x01" + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3] == True:
# Resolve remotely
ipaddr = None
req = req + "\x03" + chr(len(destaddr)) + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + "\x01" + ipaddr
req = req + self.__decode(struct.pack(">H", destport))
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0] != "\x05":
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1] != "\x00":
# Connection failed
self.close()
if ord(resp[1]) <= 8:
raise Socks5Error((ord(resp[1]), _socks5errors[ord(resp[1])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3] == "\x01":
boundaddr = self.__recvall(4)
elif resp[3] == "\x03":
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4]))
else:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
boundport = struct.unpack(">H", bytes(self.__recvall(2), 'utf8'))[0]
self.__proxysockname = boundaddr, boundport
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return socket.socket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self, destaddr, destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3] == True:
ipaddr = "\x00\x00\x00\x01"
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = "\x04\x01" + self.__decode(struct.pack(">H", destport)) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + "\x00"
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv==True:
req = req + destaddr + "\x00"
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0] != "\x00":
# Bad data
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if resp[1] != "\x5A":
# Server returned an error
self.close()
if ord(resp[1]) in (91,92,93):
self.close()
raise Socks4Error((ord(resp[1]), _socks4errors[ord(resp[1])-90]))
else:
raise Socks4Error((94,_socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]),struct.unpack(">H",bytes(resp[2:4],'utf8'))[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr),destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if self.__proxy[3] == False:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT %s:%s HTTP/1.1\r\n"
"Host: %s\r\n\r\n") % (addr, destport, destaddr))
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n") == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ", 2)
if statusline[0] not in ("HTTP/1.0", "HTTP/1.1"):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self,despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
# TODO(durin42): seriously? type checking? do we care?
if ((not isinstance(destpair, (list, tuple))) or len(destpair) < 2
or not isinstance(destpair[0], str) or not isinstance(destpair[1], int)):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
socket.socket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
socket.socket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
socket.socket.connect(self, (self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
socket.socket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| Python |
#!/usr/bin/python2.5
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Tasks Porter configuration file."""
__author__ = "dwightguth@google.com (Dwight Guth)"
from google.appengine.dist import use_library
use_library("django", "1.2")
| Python |
# Early, and incomplete implementation of -04.
#
import re
import urllib
RESERVED = ":/?#[]@!$&'()*+,;="
OPERATOR = "+./;?|!@"
EXPLODE = "*+"
MODIFIER = ":^"
TEMPLATE = re.compile(r"{(?P<operator>[\+\./;\?|!@])?(?P<varlist>[^}]+)}", re.UNICODE)
VAR = re.compile(r"^(?P<varname>[^=\+\*:\^]+)((?P<explode>[\+\*])|(?P<partial>[:\^]-?[0-9]+))?(=(?P<default>.*))?$", re.UNICODE)
def _tostring(varname, value, explode, operator, safe=""):
if type(value) == type([]):
if explode == "+":
return ",".join([varname + "." + urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
if type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return ",".join([varname + "." + urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return urllib.quote(value, safe)
def _tostring_path(varname, value, explode, operator, safe=""):
joiner = operator
if type(value) == type([]):
if explode == "+":
return joiner.join([varname + "." + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return urllib.quote(value, safe)
else:
return ""
def _tostring_query(varname, value, explode, operator, safe=""):
joiner = operator
varprefix = ""
if operator == "?":
joiner = "&"
varprefix = varname + "="
if type(value) == type([]):
if 0 == len(value):
return ""
if explode == "+":
return joiner.join([varname + "=" + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return varprefix + ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
if 0 == len(value):
return ""
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
else:
return varprefix + ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return varname + "=" + urllib.quote(value, safe)
else:
return varname
TOSTRING = {
"" : _tostring,
"+": _tostring,
";": _tostring_query,
"?": _tostring_query,
"/": _tostring_path,
".": _tostring_path,
}
def expand(template, vars):
def _sub(match):
groupdict = match.groupdict()
operator = groupdict.get('operator')
if operator is None:
operator = ''
varlist = groupdict.get('varlist')
safe = "@"
if operator == '+':
safe = RESERVED
varspecs = varlist.split(",")
varnames = []
defaults = {}
for varspec in varspecs:
m = VAR.search(varspec)
groupdict = m.groupdict()
varname = groupdict.get('varname')
explode = groupdict.get('explode')
partial = groupdict.get('partial')
default = groupdict.get('default')
if default:
defaults[varname] = default
varnames.append((varname, explode, partial))
retval = []
joiner = operator
prefix = operator
if operator == "+":
prefix = ""
joiner = ","
if operator == "?":
joiner = "&"
if operator == "":
joiner = ","
for varname, explode, partial in varnames:
if varname in vars:
value = vars[varname]
#if not value and (type(value) == type({}) or type(value) == type([])) and varname in defaults:
if not value and value != "" and varname in defaults:
value = defaults[varname]
elif varname in defaults:
value = defaults[varname]
else:
continue
retval.append(TOSTRING[operator](varname, value, explode, operator, safe=safe))
if "".join(retval):
return prefix + joiner.join(retval)
else:
return ""
return TEMPLATE.sub(_sub, template)
| Python |
#!/usr/bin/python2.5
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main web application handler for Google Tasks Porter."""
__author__ = "dwightguth@google.com (Dwight Guth)"
import logging
import os
import pickle
import urllib
from apiclient import discovery
from apiclient.oauth2client import appengine
from apiclient.oauth2client import client
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
import httplib2
import model
import settings
def _RedirectForOAuth(self, user):
"""Redirects the webapp response to authenticate the user with OAuth2."""
flow = client.OAuth2WebServerFlow(
client_id=settings.CLIENT_ID + ".apps.googleusercontent.com",
client_secret=settings.CLIENT_SECRET,
scope="https://www.googleapis.com/auth/tasks",
user_agent="task-porter/1.0",
xoauth_displayname="Google Tasks Porter",
state=self.request.path_qs)
callback = self.request.relative_url("/oauth2callback")
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
self.redirect(authorize_url)
def _GetCredentials():
user = users.get_current_user()
credentials = appengine.StorageByKeyName(
model.Credentials, user.user_id(), "credentials").get()
# so it turns out that the method that checks if the credentials are okay
# doesn't give the correct answer unless you try to refresh it. So we do that
# here in order to make sure that the credentials are valid before being
# passed to a worker. Obviously if the user revokes the credentials after
# this point we will continue to get an error, but we can't stop that.
if credentials and not credentials.invalid:
try:
http = httplib2.Http()
http = credentials.authorize(http)
service = discovery.build("tasks", "v1", http)
tasklists = service.tasklists()
tasklists_list = tasklists.list().execute()
except:
credentials = None
return user, credentials
class MainHandler(webapp.RequestHandler):
"""Handler for /."""
def get(self):
"""Handles GET requests for /."""
credentials = _GetCredentials()[1]
path = os.path.join(os.path.dirname(__file__), "index.html")
if not credentials or credentials.invalid:
template_values = {"is_authorized": False,
"msg": self.request.get("msg"),
"logout_url": users.create_logout_url("/")}
self.response.out.write(template.render(path, template_values))
else:
template_values = {"is_authorized": True,
"msg": self.request.get("msg"),
"logout_url": users.create_logout_url("/")}
self.response.out.write(template.render(path, template_values))
class AuthRedirectHandler(webapp.RequestHandler):
"""Handler for /auth."""
def get(self):
"""Handles GET requests for /auth."""
user, credentials = _GetCredentials()
if not credentials or credentials.invalid:
_RedirectForOAuth(self, user)
else:
self.redirect("/")
class ListHandler(webapp.RequestHandler):
"""Handler for /snapshots."""
def get(self):
"""Handles GET requests for /snapshots."""
user, credentials = _GetCredentials()
if not credentials or credentials.invalid:
_RedirectForOAuth(self, user)
else:
path = os.path.join(os.path.dirname(__file__), "snapshots.html")
snapshots = model.Snapshot.gql("WHERE user = :user and type = 'export'",
user=user)
counts = []
refresh = False
for snapshot in snapshots:
tasklists = model.TaskList.gql("WHERE ANCESTOR IS :id",
id=snapshot.key())
task_count = 0
tasklist_count = 0
if snapshot.status == "building":
refresh = True
for tasklist in tasklists:
task_count += tasklist.tasks.count()
tasklist_count += 1
counts.append((snapshot, task_count, tasklist_count))
template_values = {"snapshots": counts,
"msg": self.request.get("msg"),
"refresh": refresh,
"logout_url": users.create_logout_url("/snapshots")}
self.response.out.write(template.render(path, template_values))
class SnapshotHandler(webapp.RequestHandler):
"""Handler for /snapshot."""
def get(self):
"""Handles GET requests for /snapshot."""
user, credentials = _GetCredentials()
if not credentials or credentials.invalid:
_RedirectForOAuth(self, user)
else:
# need to create snapshot outside of task queue because there is no easy
# way to pass user identity to a task other than through a datastore
# entity.
snapshot = model.Snapshot()
snapshot.type = "export"
snapshot.user = users.get_current_user()
snapshot.status = "building"
snapshot.put()
taskqueue.add(url="/worker/snapshot",
params={"id": snapshot.key().id()})
self.redirect("/snapshots")
class DeleteHandler(webapp.RequestHandler):
"""Handler for /delete."""
def get(self):
"""Handles GET requests for /delete."""
user, credentials = _GetCredentials()
if self.request.get("import"):
url = "/import"
else:
url = "/snapshots"
if not credentials or credentials.invalid:
self.redirect(url)
else:
if not self.request.get("id"):
self.redirect(url + "?msg=NO_ID_DELETE")
return
snapshot = model.Snapshot.gql("WHERE user = :user "
"AND __key__ = KEY('Snapshot', :key)",
user=user,
key=int(self.request.get("id"))).get()
if snapshot is None:
self.redirect(url + "?msg=INVALID_SNAPSHOT")
return
if snapshot.status == "building":
# can't delete until snapshot is done
self.redirect(url + "?msg=DELETE_BUILDING")
return
taskqueue.add(url="/worker/delete",
params={"id": snapshot.key().id()})
self.redirect(url + "?msg=SNAPSHOT_DELETING")
class DownloadHandler(webapp.RequestHandler):
"""Handler for /download."""
def get(self):
"""Handles GET requests for /download.
This handler takes the following query parameters:
id: the internal id serving as key for the snapshot to download.
format: either "ics", "csv", or "html" depending on what format is selected
to download.
"""
user, credentials = _GetCredentials()
if not credentials or credentials.invalid:
self.redirect("/snapshots")
else:
if not self.request.get("id"):
self.redirect("/snapshots?msg=NO_ID_EXPORT")
return
snapshot = model.Snapshot.gql("WHERE user = :user "
"AND __key__ = KEY('Snapshot', :key)",
user=user,
key=int(self.request.get("id"))).get()
tasklist_entities = model.TaskList.gql("WHERE ANCESTOR IS :id",
id=snapshot.key())
template_values = {"tasklists": list(tasklist_entities),
"now": snapshot.timestamp}
if self.request.get("format") == "ics":
self.WriteIcsTemplate(template_values)
elif self.request.get("format") == "csv":
self.WriteCsvTemplate(template_values)
elif self.request.get("format") == "html":
self.WriteHtmlTemplate(template_values)
def WriteIcsTemplate(self, template_values):
self.response.headers["Content-Type"] = "text/calendar"
self.response.headers.add_header(
"Content-Disposition", "attachment; filename=tasks_%s.ics" %
template_values["now"].strftime("%m-%d-%Y"))
path = os.path.join(os.path.dirname(__file__), "todo.ics")
self.response.out.write(template.render(path, template_values))
def WriteCsvTemplate(self, template_values):
self.response.headers["Content-Type"] = "text/csv"
self.response.headers.add_header(
"Content-Disposition", "attachment; filename=tasks_%s.csv" %
template_values["now"].strftime("%m-%d-%Y"))
path = os.path.join(os.path.dirname(__file__), "todo.csv")
self.response.out.write(template.render(path, template_values))
def WriteHtmlTemplate(self, template_values):
path = os.path.join(os.path.dirname(__file__), "todo.html")
self.response.out.write(template.render(path, template_values))
class ImportHandler(webapp.RequestHandler):
"""Handler for /import."""
def get(self):
"""Handles GET requests for /import."""
user, credentials = _GetCredentials()
if not credentials or credentials.invalid:
_RedirectForOAuth(self, user)
else:
path = os.path.join(os.path.dirname(__file__), "import.html")
snapshots = model.Snapshot.gql("WHERE user = :user "
"and type = 'import'",
user=user)
titles = []
refresh = False
for snapshot in snapshots:
if snapshot.status == "completed":
title = model.TaskList.gql("WHERE ANCESTOR IS :id",
id=snapshot.key()).get().title
else:
title = ""
if snapshot.status == "building":
refresh = True
titles.append((snapshot, title))
template_values = {"snapshots": titles,
"msg": self.request.get("msg"),
"refresh": refresh,
"logout_url": users.create_logout_url("/import")}
self.response.out.write(template.render(path, template_values))
def post(self):
"""Handles POST requests for /import.
This handler takes the following query parameters:
name: The name of the tasklist to create and put the imported tasks into.
format: either "ics" or "csv" depending on whta format to import from.
The body of the POST request requires the following parameters:
file: a file reference containing either the ics or csv file to import.
"""
if (not self.request.get("file") or
not self.request.get("name") or
not self.request.get("format")):
self.redirect("/import?msg=REQUIRED_FIELD")
return
snapshot = model.Snapshot()
snapshot.type = "import"
snapshot.user = users.get_current_user()
snapshot.status = "building"
snapshot.put()
logging.info(snapshot.key().id())
try:
taskqueue.add(url="/worker/import",
params={"file": self.request.get("file"),
"name": self.request.get("name"),
"format": self.request.get("format"),
"id": snapshot.key().id()})
except taskqueue.TaskTooLargeError, e:
logging.info(e, exc_info=True)
self.redirect("/import?msg=FILE_TOO_LARGE")
return
self.redirect("/import")
class SendMailHandler(webapp.RequestHandler):
"""Handler for /sendmail."""
def get(self):
"""Handles GET requests for /sendmail."""
user, credentials = _GetCredentials()
if not credentials or credentials.invalid:
_RedirectForOAuth(self, user)
else:
path = os.path.join(os.path.dirname(__file__), "sendmail.html")
template_values = {"id": self.request.get("id"),
"msg": self.request.get("msg"),
"logout_url": users.create_logout_url("/sendmail")}
self.response.out.write(template.render(path, template_values))
def post(self):
"""Handles POST requests for /sendmail.
This handler takes the following query parameters:
id: the internal id serving as key for the snapshot to mail.
email: the Remember The Milk import email address to send to.
subject: the name of the task list to create.
"""
user, credentials = _GetCredentials()
if not credentials or credentials.invalid:
_RedirectForOAuth(self, user)
else:
if not self.request.get("id"):
self.redirect("/snapshots?msg=NO_ID_EXPORT")
return
if (not self.request.get("email") or
not self.request.get("subject")):
self.redirect("/sendmail?id=%s&msg=REQUIRED_FIELD" %
urllib.quote_plus(self.request.get("id")))
return
snapshot = model.Snapshot.gql("WHERE user = :user "
"AND __key__ = KEY('Snapshot', :key)",
user=user,
key=int(self.request.get("id"))).get()
tasklist_entities = model.TaskList.gql("WHERE ANCESTOR IS :id",
id=snapshot.key())
template_values = {"tasklists": list(tasklist_entities),
"now": snapshot.timestamp}
email_body = self.GenerateEmailBody(template_values)
mail.send_mail(sender="noreply@google.com",
to=self.request.get("email"),
subject=self.request.get("subject"),
body=email_body)
self.redirect("/snapshots")
def GenerateEmailBody(self, template_values):
path = os.path.join(os.path.dirname(__file__), "todo.txt")
return template.render(path, template_values)
class OAuthHandler(webapp.RequestHandler):
"""Handler for /oauth2callback."""
def get(self):
"""Handles GET requests for /oauth2callback."""
if not self.request.get("code"):
self.redirect("/")
return
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
error = False
try:
credentials = flow.step2_exchange(self.request.params)
except client.FlowExchangeError, e:
credentials = None
error = True
appengine.StorageByKeyName(
model.Credentials, user.user_id(), "credentials").put(credentials)
if error:
self.redirect("/?msg=ACCOUNT_ERROR")
else:
self.redirect(self.request.get("state"))
def main():
template.register_template_library("common.customdjango")
application = webapp.WSGIApplication(
[
("/", MainHandler),
("/auth", AuthRedirectHandler),
("/delete", DeleteHandler),
("/download", DownloadHandler),
("/import", ImportHandler),
("/oauth2callback", OAuthHandler),
("/sendmail", SendMailHandler),
("/snapshot", SnapshotHandler),
("/snapshots", ListHandler)
])
util.run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.5
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for iCalendar file data."""
__author__ = "dwightguth@google.com (Dwight Guth)"
import datetime
import logging
import vobject
import model
class Parser(object):
"""Parses VTODO components into App Engine datastore entities."""
def __init__(self, tasklist):
"""Creates a new Parser object.
Args:
tasklist: the tasklist datastore entity to put the parsed tasks into.
"""
self.tasklist = tasklist
def ParseAndStore(self, vcal_data):
"""Parses the provided data and stores the resulting entities.
Args:
vcal_data: the text of an ics file to be parsed for todo objects.
Returns:
The list of entities created by parsing vcal_data.
"""
vcal = vobject.readOne(vcal_data)
results = []
for todo in vcal.components():
results.append(self.ParseItem(todo))
return results
def ParseItem(self, item):
"""Parses a single VTODO object and stores the resulting entity.
Args:
item: an icalendar object representing a VTODO object.
Returns:
The entity created by parsing item.
"""
logging.info(item)
task = model.Task()
if self.tasklist:
task.parent_entity = self.tasklist
if "summary" in item.contents:
task.title = item.summary.value
else:
# we need a title so if it's not there we use the empty string
task.title = ""
if "description" in item.contents:
task.notes = item.description.value
if "due" in item.contents:
due = item.due.value
if isinstance(due, datetime.datetime):
task.due = due.date()
elif isinstance(due, datetime.date):
task.due = due
if "completed" in item.contents:
# we don't use the status field because iCalendar doesn't always specify
# it on completed tasks
task.status = "completed"
task.completed = item.completed.value
else:
task.status = "needsAction"
task.put()
return task
| Python |
#!/usr/bin/python2.5
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents the App Engine model of Google Tasks data in the datastore."""
from apiclient.oauth2client import appengine
from google.appengine.ext import db
class Credentials(db.Model):
"""Represents the credentials of a particular user."""
credentials = appengine.CredentialsProperty()
class Snapshot(db.Model):
"""The datastore entity for a Snapshot of a user's data."""
user = db.UserProperty()
type = db.StringProperty(choices=("import", "export"))
timestamp = db.DateTimeProperty(auto_now_add=True)
status = db.StringProperty(choices=("building", "completed", "error"))
errorMessage = db.StringProperty()
class TaskList(db.Model):
"""The datastore entity for a list of tasks."""
id = db.StringProperty()
title = db.TextProperty() #CATEGORIES/Categories
selfLink = db.LinkProperty()
class Task(db.Model):
"""The datastore entity for a single task."""
parent_entity = db.ReferenceProperty(TaskList, collection_name="tasks")
id = db.StringProperty() #UID
selfLink = db.LinkProperty()
title = db.TextProperty() #SUMMARY/Subject
notes = db.TextProperty() #DESCRIPTION/Notes
parent_ = db.SelfReferenceProperty(collection_name="children")
position = db.StringProperty()
updated = db.DateTimeProperty() #LAST-MODIFIED
due = db.DateProperty() #DUE/Due Date
hidden = db.BooleanProperty()
status = db.StringProperty(choices=("completed",
"needsAction")) #STATUS/Status
deleted = db.BooleanProperty()
completed = db.DateTimeProperty() #COMPLETED/Date Completed
child_mapping = {}
many_many_mapping = {}
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
self._filename = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
class _tzicalvtzcomp:
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
__reduce__ = object.__reduce__
class tzical:
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
self._s = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
if len(keys) == 0:
raise ValueError, "no timezones defined"
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError, "unknown component: "+value
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
if not tzid:
raise ValueError, \
"mandatory TZID not found"
if not comps:
raise ValueError, \
"at least one component is needed"
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import itertools
import datetime
import calendar
import thread
import sys
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(gen.next())
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem:
def __init__(self, genlist, gen):
try:
self.dt = gen()
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
try:
self.dt = self.gen()
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
rlist.sort()
self._len = total
class _rrulestr:
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
| Python |
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import calendar
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __radd__(self, other):
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| Python |
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import _winreg
__author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>"
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
try:
_winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i)
for i in range(_winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in xrange(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = _winreg.QueryInfoKey(key)[1]
for i in range(size):
data = _winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| Python |
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__version__ = "1.5"
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| Python |
import _winreg
import struct
import datetime
handle=_winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzparent=_winreg.OpenKey(handle,
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones")
parentsize=_winreg.QueryInfoKey(tzparent)[0]
localkey=_winreg.OpenKey(handle,
"SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation")
WEEKS=datetime.timedelta(7)
def list_timezones():
"""Return a list of all time zones known to the system."""
l=[]
for i in xrange(parentsize):
l.append(_winreg.EnumKey(tzparent, i))
return l
class win32tz(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry.
>>> local = win32tz('Central Standard Time')
>>> oct1 = datetime.datetime(month=10, year=2004, day=1, tzinfo=local)
>>> dec1 = datetime.datetime(month=12, year=2004, day=1, tzinfo=local)
>>> oct1.dst()
datetime.timedelta(0, 3600)
>>> dec1.dst()
datetime.timedelta(0)
>>> braz = win32tz('E. South America Standard Time')
>>> braz.dst(oct1)
datetime.timedelta(0)
>>> braz.dst(dec1)
datetime.timedelta(0, 3600)
"""
def __init__(self, name):
self.data=win32tz_data(name)
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self.data.dstoffset)
else:
return datetime.timedelta(minutes=self.data.stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self.data.dstoffset - self.data.stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt): return self.data.dstname
else: return self.data.stdname
def _isdst(self, dt):
dat=self.data
dston = pickNthWeekday(dt.year, dat.dstmonth, dat.dstdayofweek,
dat.dsthour, dat.dstminute, dat.dstweeknumber)
dstoff = pickNthWeekday(dt.year, dat.stdmonth, dat.stddayofweek,
dat.stdhour, dat.stdminute, dat.stdweeknumber)
if dston < dstoff:
if dston <= dt.replace(tzinfo=None) < dstoff: return True
else: return False
else:
if dstoff <= dt.replace(tzinfo=None) < dston: return False
else: return True
def __repr__(self):
return "<win32tz - %s>" % self.data.display
def pickNthWeekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek > 4 means last instance"""
first = datetime.datetime(year=year, month=month, hour=hour, minute=minute,
day=1)
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7 + 1))
for n in xrange(whichweek - 1, -1, -1):
dt=weekdayone + n * WEEKS
if dt.month == month: return dt
class win32tz_data(object):
"""Read a registry key for a timezone, expose its contents."""
def __init__(self, path):
"""Load path, or if path is empty, load local time."""
if path:
keydict=valuesToDict(_winreg.OpenKey(tzparent, path))
self.display = keydict['Display']
self.dstname = keydict['Dlt']
self.stdname = keydict['Std']
#see http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack('=3l16h', keydict['TZI'])
self.stdoffset = -tup[0]-tup[1] #Bias + StandardBias * -1
self.dstoffset = self.stdoffset - tup[2] # + DaylightBias * -1
offset=3
self.stdmonth = tup[1 + offset]
self.stddayofweek = tup[2 + offset] #Sunday=0
self.stdweeknumber = tup[3 + offset] #Last = 5
self.stdhour = tup[4 + offset]
self.stdminute = tup[5 + offset]
offset=11
self.dstmonth = tup[1 + offset]
self.dstdayofweek = tup[2 + offset] #Sunday=0
self.dstweeknumber = tup[3 + offset] #Last = 5
self.dsthour = tup[4 + offset]
self.dstminute = tup[5 + offset]
else:
keydict=valuesToDict(localkey)
self.stdname = keydict['StandardName']
self.dstname = keydict['DaylightName']
sourcekey=_winreg.OpenKey(tzparent, self.stdname)
self.display = valuesToDict(sourcekey)['Display']
self.stdoffset = -keydict['Bias']-keydict['StandardBias']
self.dstoffset = self.stdoffset - keydict['DaylightBias']
#see http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack('=8h', keydict['StandardStart'])
offset=0
self.stdmonth = tup[1 + offset]
self.stddayofweek = tup[2 + offset] #Sunday=0
self.stdweeknumber = tup[3 + offset] #Last = 5
self.stdhour = tup[4 + offset]
self.stdminute = tup[5 + offset]
tup = struct.unpack('=8h', keydict['DaylightStart'])
self.dstmonth = tup[1 + offset]
self.dstdayofweek = tup[2 + offset] #Sunday=0
self.dstweeknumber = tup[3 + offset] #Last = 5
self.dsthour = tup[4 + offset]
self.dstminute = tup[5 + offset]
def valuesToDict(key):
"""Convert a registry key's values to a dictionary."""
dict={}
size=_winreg.QueryInfoKey(key)[1]
for i in xrange(size):
dict[_winreg.EnumValue(key, i)[0]]=_winreg.EnumValue(key, i)[1]
return dict
def _test():
import win32tz, doctest
doctest.testmod(win32tz, verbose=0)
if __name__ == '__main__':
_test() | Python |
"""Compare VTODOs and VEVENTs in two iCalendar sources."""
from base import Component, getBehavior, newFromBehavior
def getSortKey(component):
def getUID(component):
return component.getChildValue('uid', '')
# it's not quite as simple as getUID, need to account for recurrenceID and
# sequence
def getSequence(component):
sequence = component.getChildValue('sequence', 0)
return "%05d" % int(sequence)
def getRecurrenceID(component):
recurrence_id = component.getChildValue('recurrence_id', None)
if recurrence_id is None:
return '0000-00-00'
else:
return recurrence_id.isoformat()
return getUID(component) + getSequence(component) + getRecurrenceID(component)
def sortByUID(components):
return sorted(components, key=getSortKey)
def deleteExtraneous(component, ignore_dtstamp=False):
"""
Recursively walk the component's children, deleting extraneous details like
X-VOBJ-ORIGINAL-TZID.
"""
for comp in component.components():
deleteExtraneous(comp, ignore_dtstamp)
for line in component.lines():
if line.params.has_key('X-VOBJ-ORIGINAL-TZID'):
del line.params['X-VOBJ-ORIGINAL-TZID']
if ignore_dtstamp and hasattr(component, 'dtstamp_list'):
del component.dtstamp_list
def diff(left, right):
"""
Take two VCALENDAR components, compare VEVENTs and VTODOs in them,
return a list of object pairs containing just UID and the bits
that didn't match, using None for objects that weren't present in one
version or the other.
When there are multiple ContentLines in one VEVENT, for instance many
DESCRIPTION lines, such lines original order is assumed to be
meaningful. Order is also preserved when comparing (the unlikely case
of) multiple parameters of the same type in a ContentLine
"""
def processComponentLists(leftList, rightList):
output = []
rightIndex = 0
rightListSize = len(rightList)
for comp in leftList:
if rightIndex >= rightListSize:
output.append((comp, None))
else:
leftKey = getSortKey(comp)
rightComp = rightList[rightIndex]
rightKey = getSortKey(rightComp)
while leftKey > rightKey:
output.append((None, rightComp))
rightIndex += 1
if rightIndex >= rightListSize:
output.append((comp, None))
break
else:
rightComp = rightList[rightIndex]
rightKey = getSortKey(rightComp)
if leftKey < rightKey:
output.append((comp, None))
elif leftKey == rightKey:
rightIndex += 1
matchResult = processComponentPair(comp, rightComp)
if matchResult is not None:
output.append(matchResult)
return output
def newComponent(name, body):
if body is None:
return None
else:
c = Component(name)
c.behavior = getBehavior(name)
c.isNative = True
return c
def processComponentPair(leftComp, rightComp):
"""
Return None if a match, or a pair of components including UIDs and
any differing children.
"""
leftChildKeys = leftComp.contents.keys()
rightChildKeys = rightComp.contents.keys()
differentContentLines = []
differentComponents = {}
for key in leftChildKeys:
rightList = rightComp.contents.get(key, [])
if isinstance(leftComp.contents[key][0], Component):
compDifference = processComponentLists(leftComp.contents[key],
rightList)
if len(compDifference) > 0:
differentComponents[key] = compDifference
elif leftComp.contents[key] != rightList:
differentContentLines.append((leftComp.contents[key],
rightList))
for key in rightChildKeys:
if key not in leftChildKeys:
if isinstance(rightComp.contents[key][0], Component):
differentComponents[key] = ([], rightComp.contents[key])
else:
differentContentLines.append(([], rightComp.contents[key]))
if len(differentContentLines) == 0 and len(differentComponents) == 0:
return None
else:
left = newFromBehavior(leftComp.name)
right = newFromBehavior(leftComp.name)
# add a UID, if one existed, despite the fact that they'll always be
# the same
uid = leftComp.getChildValue('uid')
if uid is not None:
left.add( 'uid').value = uid
right.add('uid').value = uid
for name, childPairList in differentComponents.iteritems():
leftComponents, rightComponents = zip(*childPairList)
if len(leftComponents) > 0:
# filter out None
left.contents[name] = filter(None, leftComponents)
if len(rightComponents) > 0:
# filter out None
right.contents[name] = filter(None, rightComponents)
for leftChildLine, rightChildLine in differentContentLines:
nonEmpty = leftChildLine or rightChildLine
name = nonEmpty[0].name
if leftChildLine is not None:
left.contents[name] = leftChildLine
if rightChildLine is not None:
right.contents[name] = rightChildLine
return left, right
vevents = processComponentLists(sortByUID(getattr(left, 'vevent_list', [])),
sortByUID(getattr(right, 'vevent_list', [])))
vtodos = processComponentLists(sortByUID(getattr(left, 'vtodo_list', [])),
sortByUID(getattr(right, 'vtodo_list', [])))
return vevents + vtodos
def prettyDiff(leftObj, rightObj):
for left, right in diff(leftObj, rightObj):
print "<<<<<<<<<<<<<<<"
if left is not None:
left.prettyPrint()
print "==============="
if right is not None:
right.prettyPrint()
print ">>>>>>>>>>>>>>>"
print
from optparse import OptionParser
import icalendar, base
import os
import codecs
def main():
options, args = getOptions()
if args:
ignore_dtstamp = options.ignore
ics_file1, ics_file2 = args
cal1 = base.readOne(file(ics_file1))
cal2 = base.readOne(file(ics_file2))
deleteExtraneous(cal1, ignore_dtstamp=ignore_dtstamp)
deleteExtraneous(cal2, ignore_dtstamp=ignore_dtstamp)
prettyDiff(cal1, cal2)
version = "0.1"
def getOptions():
##### Configuration options #####
usage = "usage: %prog [options] ics_file1 ics_file2"
parser = OptionParser(usage=usage, version=version)
parser.set_description("ics_diff will print a comparison of two iCalendar files ")
parser.add_option("-i", "--ignore-dtstamp", dest="ignore", action="store_true",
default=False, help="ignore DTSTAMP lines [default: False]")
(cmdline_options, args) = parser.parse_args()
if len(args) < 2:
print "error: too few arguments given"
print
print parser.format_help()
return False, False
return cmdline_options, args
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Aborted"
| Python |
"""Behavior (validation, encoding, and transformations) for vobjects."""
import base
#------------------------ Abstract class for behavior --------------------------
class Behavior(object):
"""Abstract class to describe vobject options, requirements and encodings.
Behaviors are used for root components like VCALENDAR, for subcomponents
like VEVENT, and for individual lines in components.
Behavior subclasses are not meant to be instantiated, all methods should
be classmethods.
@cvar name:
The uppercase name of the object described by the class, or a generic
name if the class defines behavior for many objects.
@cvar description:
A brief excerpt from the RFC explaining the function of the component or
line.
@cvar versionString:
The string associated with the component, for instance, 2.0 if there's a
line like VERSION:2.0, an empty string otherwise.
@cvar knownChildren:
A dictionary with uppercased component/property names as keys and a
tuple (min, max, id) as value, where id is the id used by
L{registerBehavior}, min and max are the limits on how many of this child
must occur. None is used to denote no max or no id.
@cvar quotedPrintable:
A boolean describing whether the object should be encoded and decoded
using quoted printable line folding and character escaping.
@cvar defaultBehavior:
Behavior to apply to ContentLine children when no behavior is found.
@cvar hasNative:
A boolean describing whether the object can be transformed into a more
Pythonic object.
@cvar isComponent:
A boolean, True if the object should be a Component.
@cvar sortFirst:
The lower-case list of children which should come first when sorting.
@cvar allowGroup:
Whether or not vCard style group prefixes are allowed.
"""
name=''
description=''
versionString=''
knownChildren = {}
quotedPrintable = False
defaultBehavior = None
hasNative= False
isComponent = False
allowGroup = False
forceUTC = False
sortFirst = []
def __init__(self):
err="Behavior subclasses are not meant to be instantiated"
raise base.VObjectError(err)
@classmethod
def validate(cls, obj, raiseException=False, complainUnrecognized=False):
"""Check if the object satisfies this behavior's requirements.
@param obj:
The L{ContentLine<base.ContentLine>} or
L{Component<base.Component>} to be validated.
@param raiseException:
If True, raise a L{base.ValidateError} on validation failure.
Otherwise return a boolean.
@param complainUnrecognized:
If True, fail to validate if an uncrecognized parameter or child is
found. Otherwise log the lack of recognition.
"""
if not cls.allowGroup and obj.group is not None:
err = str(obj) + " has a group, but this object doesn't support groups"
raise base.VObjectError(err)
if isinstance(obj, base.ContentLine):
return cls.lineValidate(obj, raiseException, complainUnrecognized)
elif isinstance(obj, base.Component):
count = {}
for child in obj.getChildren():
if not child.validate(raiseException, complainUnrecognized):
return False
name=child.name.upper()
count[name] = count.get(name, 0) + 1
for key, val in cls.knownChildren.iteritems():
if count.get(key,0) < val[0]:
if raiseException:
m = "%s components must contain at least %i %s"
raise base.ValidateError(m % (cls.name, val[0], key))
return False
if val[1] and count.get(key,0) > val[1]:
if raiseException:
m = "%s components cannot contain more than %i %s"
raise base.ValidateError(m % (cls.name, val[1], key))
return False
return True
else:
err = str(obj) + " is not a Component or Contentline"
raise base.VObjectError(err)
@classmethod
def lineValidate(cls, line, raiseException, complainUnrecognized):
"""Examine a line's parameters and values, return True if valid."""
return True
@classmethod
def decode(cls, line):
if line.encoded: line.encoded=0
@classmethod
def encode(cls, line):
if not line.encoded: line.encoded=1
@classmethod
def transformToNative(cls, obj):
"""Turn a ContentLine or Component into a Python-native representation.
If appropriate, turn dates or datetime strings into Python objects.
Components containing VTIMEZONEs turn into VtimezoneComponents.
"""
return obj
@classmethod
def transformFromNative(cls, obj):
"""Inverse of transformToNative."""
raise base.NativeError("No transformFromNative defined")
@classmethod
def generateImplicitParameters(cls, obj):
"""Generate any required information that don't yet exist."""
pass
@classmethod
def serialize(cls, obj, buf, lineLength, validate=True):
"""Set implicit parameters, do encoding, return unicode string.
If validate is True, raise VObjectError if the line doesn't validate
after implicit parameters are generated.
Default is to call base.defaultSerialize.
"""
cls.generateImplicitParameters(obj)
if validate: cls.validate(obj, raiseException=True)
if obj.isNative:
transformed = obj.transformFromNative()
undoTransform = True
else:
transformed = obj
undoTransform = False
out = base.defaultSerialize(transformed, buf, lineLength)
if undoTransform: obj.transformToNative()
return out
@classmethod
def valueRepr( cls, line ):
"""return the representation of the given content line value"""
return line.value | Python |
"""vobject module for reading vCard and vCalendar files."""
import copy
import re
import sys
import logging
import StringIO, cStringIO
import string
import exceptions
import codecs
#------------------------------------ Logging ----------------------------------
logger = logging.getLogger(__name__)
if not logging.getLogger().handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR) # Log errors
DEBUG = False # Don't waste time on debug calls
#----------------------------------- Constants ---------------------------------
CR = '\r'
LF = '\n'
CRLF = CR + LF
SPACE = ' '
TAB = '\t'
SPACEORTAB = SPACE + TAB
#-------------------------------- Useful modules -------------------------------
# use doctest, it kills two birds with one stone and docstrings often become
# more readable to boot (see parseLine's docstring).
# use logging, then when debugging we can just set our verbosity.
# use epydoc syntax for documenting code, please document every class and non-
# trivial method (see http://epydoc.sourceforge.net/epytext.html
# and http://epydoc.sourceforge.net/fields.html). Also, please
# follow http://www.python.org/peps/pep-0257.html for docstrings.
#-------------------------------------------------------------------------------
#--------------------------------- Main classes --------------------------------
class VBase(object):
"""Base class for ContentLine and Component.
@ivar behavior:
The Behavior class associated with this object, which controls
validation, transformations, and encoding.
@ivar parentBehavior:
The object's parent's behavior, or None if no behaviored parent exists.
@ivar isNative:
Boolean describing whether this component is a Native instance.
@ivar group:
An optional group prefix, should be used only to indicate sort order in
vCards, according to RFC2426
"""
def __init__(self, group=None, *args, **kwds):
super(VBase, self).__init__(*args, **kwds)
self.group = group
self.behavior = None
self.parentBehavior = None
self.isNative = False
def copy(self, copyit):
self.group = copyit.group
self.behavior = copyit.behavior
self.parentBehavior = copyit.parentBehavior
self.isNative = copyit.isNative
def validate(self, *args, **kwds):
"""Call the behavior's validate method, or return True."""
if self.behavior:
return self.behavior.validate(self, *args, **kwds)
else: return True
def getChildren(self):
"""Return an iterable containing the contents of the object."""
return []
def clearBehavior(self, cascade=True):
"""Set behavior to None. Do for all descendants if cascading."""
self.behavior=None
if cascade: self.transformChildrenFromNative()
def autoBehavior(self, cascade=False):
"""Set behavior if name is in self.parentBehavior.knownChildren.
If cascade is True, unset behavior and parentBehavior for all
descendants, then recalculate behavior and parentBehavior.
"""
parentBehavior = self.parentBehavior
if parentBehavior is not None:
knownChildTup = parentBehavior.knownChildren.get(self.name, None)
if knownChildTup is not None:
behavior = getBehavior(self.name, knownChildTup[2])
if behavior is not None:
self.setBehavior(behavior, cascade)
if isinstance(self, ContentLine) and self.encoded:
self.behavior.decode(self)
elif isinstance(self, ContentLine):
self.behavior = parentBehavior.defaultBehavior
if self.encoded and self.behavior:
self.behavior.decode(self)
def setBehavior(self, behavior, cascade=True):
"""Set behavior. If cascade is True, autoBehavior all descendants."""
self.behavior=behavior
if cascade:
for obj in self.getChildren():
obj.parentBehavior=behavior
obj.autoBehavior(True)
def transformToNative(self):
"""Transform this object into a custom VBase subclass.
transformToNative should always return a representation of this object.
It may do so by modifying self in place then returning self, or by
creating a new object.
"""
if self.isNative or not self.behavior or not self.behavior.hasNative:
return self
else:
try:
return self.behavior.transformToNative(self)
except Exception, e:
# wrap errors in transformation in a ParseError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, ParseError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformToNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = ParseError(msg, lineNumber)
raise ParseError, new_error, sys.exc_info()[2]
def transformFromNative(self):
"""Return self transformed into a ContentLine or Component if needed.
May have side effects. If it does, transformFromNative and
transformToNative MUST have perfectly inverse side effects. Allowing
such side effects is convenient for objects whose transformations only
change a few attributes.
Note that it isn't always possible for transformFromNative to be a
perfect inverse of transformToNative, in such cases transformFromNative
should return a new object, not self after modifications.
"""
if self.isNative and self.behavior and self.behavior.hasNative:
try:
return self.behavior.transformFromNative(self)
except Exception, e:
# wrap errors in transformation in a NativeError
lineNumber = getattr(self, 'lineNumber', None)
if isinstance(e, NativeError):
if lineNumber is not None:
e.lineNumber = lineNumber
raise
else:
msg = "In transformFromNative, unhandled exception: %s: %s"
msg = msg % (sys.exc_info()[0], sys.exc_info()[1])
new_error = NativeError(msg, lineNumber)
raise NativeError, new_error, sys.exc_info()[2]
else: return self
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
pass
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
pass
def serialize(self, buf=None, lineLength=75, validate=True, behavior=None):
"""Serialize to buf if it exists, otherwise return a string.
Use self.behavior.serialize if behavior exists.
"""
if not behavior:
behavior = self.behavior
if behavior:
if DEBUG: logger.debug("serializing %s with behavior" % self.name)
return behavior.serialize(self, buf, lineLength, validate)
else:
if DEBUG: logger.debug("serializing %s without behavior" % self.name)
return defaultSerialize(self, buf, lineLength)
def ascii(s):
"""Turn s into a printable string. Won't work for 8-bit ASCII."""
return unicode(s).encode('ascii', 'replace')
def toVName(name, stripNum = 0, upper = False):
"""
Turn a Python name into an iCalendar style name, optionally uppercase and
with characters stripped off.
"""
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace('_', '-')
class ContentLine(VBase):
"""Holds one content line for formats like vCard and vCalendar.
For example::
<SUMMARY{u'param1' : [u'val1'], u'param2' : [u'val2']}Bastille Day Party>
@ivar name:
The uppercased name of the contentline.
@ivar params:
A dictionary of parameters and associated lists of values (the list may
be empty for empty parameters).
@ivar value:
The value of the contentline.
@ivar singletonparams:
A list of parameters for which it's unclear if the string represents the
parameter name or the parameter value. In vCard 2.1, "The value string
can be specified alone in those cases where the value is unambiguous".
This is crazy, but we have to deal with it.
@ivar encoded:
A boolean describing whether the data in the content line is encoded.
Generally, text read from a serialized vCard or vCalendar should be
considered encoded. Data added programmatically should not be encoded.
@ivar lineNumber:
An optional line number associated with the contentline.
"""
def __init__(self, name, params, value, group=None,
encoded=False, isNative=False,
lineNumber = None, *args, **kwds):
"""Take output from parseLine, convert params list to dictionary."""
# group is used as a positional argument to match parseLine's return
super(ContentLine, self).__init__(group, *args, **kwds)
self.name = name.upper()
self.value = value
self.encoded = encoded
self.params = {}
self.singletonparams = []
self.isNative = isNative
self.lineNumber = lineNumber
def updateTable(x):
if len(x) == 1:
self.singletonparams += x
else:
paramlist = self.params.setdefault(x[0].upper(), [])
paramlist.extend(x[1:])
map(updateTable, params)
qp = False
if 'ENCODING' in self.params:
if 'QUOTED-PRINTABLE' in self.params['ENCODING']:
qp = True
self.params['ENCODING'].remove('QUOTED-PRINTABLE')
if 0==len(self.params['ENCODING']):
del self.params['ENCODING']
if 'QUOTED-PRINTABLE' in self.singletonparams:
qp = True
self.singletonparams.remove('QUOTED-PRINTABLE')
if qp:
self.value = str(self.value).decode('quoted-printable')
# self.value should be unicode for iCalendar, but if quoted-printable
# is used, or if the quoted-printable state machine is used, text may be
# encoded
if type(self.value) is str:
charset = 'iso-8859-1'
if 'CHARSET' in self.params:
charsets = self.params.pop('CHARSET')
if charsets:
charset = charsets[0]
self.value = unicode(self.value, charset)
@classmethod
def duplicate(clz, copyit):
newcopy = clz('', {}, '')
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(ContentLine, self).copy(copyit)
self.name = copyit.name
self.value = copy.copy(copyit.value)
self.encoded = self.encoded
self.params = copy.copy(copyit.params)
self.singletonparams = copy.copy(copyit.singletonparams)
self.lineNumber = copyit.lineNumber
def __eq__(self, other):
try:
return (self.name == other.name) and (self.params == other.params) and (self.value == other.value)
except:
return False
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
keys = self.params.keys()
params = [param + '_param' for param in keys]
params.extend(param + '_paramlist' for param in keys)
return params
def __getattr__(self, name):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
try:
if name.endswith('_param'):
return self.params[toVName(name, 6, True)][0]
elif name.endswith('_paramlist'):
return self.params[toVName(name, 10, True)]
else:
raise exceptions.AttributeError, name
except KeyError:
raise exceptions.AttributeError, name
def __setattr__(self, name, value):
"""Make params accessible via self.foo_param or self.foo_paramlist.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name.endswith('_param'):
if type(value) == list:
self.params[toVName(name, 6, True)] = value
else:
self.params[toVName(name, 6, True)] = [value]
elif name.endswith('_paramlist'):
if type(value) == list:
self.params[toVName(name, 10, True)] = value
else:
raise VObjectError("Parameter list set to a non-list")
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name.endswith('_param'):
del self.params[toVName(name, 6, True)]
elif name.endswith('_paramlist'):
del self.params[toVName(name, 10, True)]
else:
object.__delattr__(self, name)
except KeyError:
raise exceptions.AttributeError, name
def valueRepr( self ):
"""transform the representation of the value according to the behavior,
if any"""
v = self.value
if self.behavior:
v = self.behavior.valueRepr( self )
return ascii( v )
def __str__(self):
return "<"+ascii(self.name)+ascii(self.params)+self.valueRepr()+">"
def __repr__(self):
return self.__str__().replace('\n', '\\n')
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print pre, self.name + ":", self.valueRepr()
if self.params:
lineKeys= self.params.keys()
print pre, "params for ", self.name +':'
for aKey in lineKeys:
print pre + ' ' * tabwidth, aKey, ascii(self.params[aKey])
class Component(VBase):
"""A complex property that can contain multiple ContentLines.
For our purposes, a component must start with a BEGIN:xxxx line and end with
END:xxxx, or have a PROFILE:xxx line if a top-level component.
@ivar contents:
A dictionary of lists of Component or ContentLine instances. The keys
are the lowercased names of child ContentLines or Components.
Note that BEGIN and END ContentLines are not included in contents.
@ivar name:
Uppercase string used to represent this Component, i.e VCARD if the
serialized object starts with BEGIN:VCARD.
@ivar useBegin:
A boolean flag determining whether BEGIN: and END: lines should
be serialized.
"""
def __init__(self, name=None, *args, **kwds):
super(Component, self).__init__(*args, **kwds)
self.contents = {}
if name:
self.name=name.upper()
self.useBegin = True
else:
self.name = ''
self.useBegin = False
self.autoBehavior()
@classmethod
def duplicate(clz, copyit):
newcopy = clz()
newcopy.copy(copyit)
return newcopy
def copy(self, copyit):
super(Component, self).copy(copyit)
# deep copy of contents
self.contents = {}
for key, lvalue in copyit.contents.items():
newvalue = []
for value in lvalue:
newitem = value.duplicate(value)
newvalue.append(newitem)
self.contents[key] = newvalue
self.name = copyit.name
self.useBegin = copyit.useBegin
def setProfile(self, name):
"""Assign a PROFILE to this unnamed component.
Used by vCard, not by vCalendar.
"""
if self.name or self.useBegin:
if self.name == name: return
raise VObjectError("This component already has a PROFILE or uses BEGIN.")
self.name = name.upper()
def _getAttributeNames(self):
"""Return a list of attributes of the object.
Python 2.6 will add __dir__ to customize what attributes are returned
by dir, for now copy PyCrust so that IPython can accurately do
completion.
"""
names = self.contents.keys()
names.extend(name + '_list' for name in self.contents.keys())
return names
def __getattr__(self, name):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
# if the object is being re-created by pickle, self.contents may not
# be set, don't get into an infinite loop over the issue
if name == 'contents':
return object.__getattribute__(self, name)
try:
if name.endswith('_list'):
return self.contents[toVName(name, 5)]
else:
return self.contents[toVName(name)][0]
except KeyError:
raise exceptions.AttributeError, name
normal_attributes = ['contents','name','behavior','parentBehavior','group']
def __setattr__(self, name, value):
"""For convenience, make self.contents directly accessible.
Underscores, legal in python variable names, are converted to dashes,
which are legal in IANA tokens.
"""
if name not in self.normal_attributes and name.lower()==name:
if type(value) == list:
if name.endswith('_list'):
name = name[:-5]
self.contents[toVName(name)] = value
elif name.endswith('_list'):
raise VObjectError("Component list set to a non-list")
else:
self.contents[toVName(name)] = [value]
else:
prop = getattr(self.__class__, name, None)
if isinstance(prop, property):
prop.fset(self, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
try:
if name not in self.normal_attributes and name.lower()==name:
if name.endswith('_list'):
del self.contents[toVName(name, 5)]
else:
del self.contents[toVName(name)]
else:
object.__delattr__(self, name)
except KeyError:
raise exceptions.AttributeError, name
def getChildValue(self, childName, default = None, childNumber = 0):
"""Return a child's value (the first, by default), or None."""
child = self.contents.get(toVName(childName))
if child is None:
return default
else:
return child[childNumber].value
def add(self, objOrName, group = None):
"""Add objOrName to contents, set behavior if it can be inferred.
If objOrName is a string, create an empty component or line based on
behavior. If no behavior is found for the object, add a ContentLine.
group is an optional prefix to the name of the object (see
RFC 2425).
"""
if isinstance(objOrName, VBase):
obj = objOrName
if self.behavior:
obj.parentBehavior = self.behavior
obj.autoBehavior(True)
else:
name = objOrName.upper()
try:
id=self.behavior.knownChildren[name][2]
behavior = getBehavior(name, id)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '', group)
obj.parentBehavior = self.behavior
obj.behavior = behavior
obj = obj.transformToNative()
except (KeyError, AttributeError):
obj = ContentLine(objOrName, [], '', group)
if obj.behavior is None and self.behavior is not None:
if isinstance(obj, ContentLine):
obj.behavior = self.behavior.defaultBehavior
self.contents.setdefault(obj.name.lower(), []).append(obj)
return obj
def remove(self, obj):
"""Remove obj from contents."""
named = self.contents.get(obj.name.lower())
if named:
try:
named.remove(obj)
if len(named) == 0:
del self.contents[obj.name.lower()]
except ValueError:
pass;
def getChildren(self):
"""Return an iterable of all children."""
for objList in self.contents.values():
for obj in objList: yield obj
def components(self):
"""Return an iterable of all Component children."""
return (i for i in self.getChildren() if isinstance(i, Component))
def lines(self):
"""Return an iterable of all ContentLine children."""
return (i for i in self.getChildren() if isinstance(i, ContentLine))
def sortChildKeys(self):
try:
first = [s for s in self.behavior.sortFirst if s in self.contents]
except:
first = []
return first + sorted(k for k in self.contents.keys() if k not in first)
def getSortedChildren(self):
return [obj for k in self.sortChildKeys() for obj in self.contents[k]]
def setBehaviorFromVersionLine(self, versionLine):
"""Set behavior if one matches name, versionLine.value."""
v=getBehavior(self.name, versionLine.value)
if v: self.setBehavior(v)
def transformChildrenToNative(self):
"""Recursively replace children with their native representation."""
#sort to get dependency order right, like vtimezone before vevent
for childArray in (self.contents[k] for k in self.sortChildKeys()):
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformToNative()
childArray[i].transformChildrenToNative()
def transformChildrenFromNative(self, clearBehavior=True):
"""Recursively transform native children to vanilla representations."""
for childArray in self.contents.values():
for i in xrange(len(childArray)):
childArray[i]=childArray[i].transformFromNative()
childArray[i].transformChildrenFromNative(clearBehavior)
if clearBehavior:
childArray[i].behavior = None
childArray[i].parentBehavior = None
def __str__(self):
if self.name:
return "<" + self.name + "| " + str(self.getSortedChildren()) + ">"
else:
return '<' + '*unnamed*' + '| ' + str(self.getSortedChildren()) + '>'
def __repr__(self):
return self.__str__()
def prettyPrint(self, level = 0, tabwidth=3):
pre = ' ' * level * tabwidth
print pre, self.name
if isinstance(self, Component):
for line in self.getChildren():
line.prettyPrint(level + 1, tabwidth)
print
class VObjectError(Exception):
def __init__(self, message, lineNumber=None):
self.message = message
if lineNumber is not None:
self.lineNumber = lineNumber
def __str__(self):
if hasattr(self, 'lineNumber'):
return "At line %s: %s" % \
(self.lineNumber, self.message)
else:
return repr(self.message)
class ParseError(VObjectError):
pass
class ValidateError(VObjectError):
pass
class NativeError(VObjectError):
pass
#-------------------------- Parsing functions ----------------------------------
# parseLine regular expressions
patterns = {}
# Note that underscore is not legal for names, it's included because
# Lotus Notes uses it
patterns['name'] = '[a-zA-Z0-9\-_]+'
patterns['safe_char'] = '[^";:,]'
patterns['qsafe_char'] = '[^"]'
# the combined Python string replacement and regex syntax is a little confusing;
# remember that %(foobar)s is replaced with patterns['foobar'], so for instance
# param_value is any number of safe_chars or any number of qsaf_chars surrounded
# by double quotes.
patterns['param_value'] = ' "%(qsafe_char)s * " | %(safe_char)s * ' % patterns
# get a tuple of two elements, one will be empty, the other will have the value
patterns['param_value_grouped'] = """
" ( %(qsafe_char)s * )" | ( %(safe_char)s + )
""" % patterns
# get a parameter and its values, without any saved groups
patterns['param'] = r"""
; (?: %(name)s ) # parameter name
(?:
(?: = (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)*
""" % patterns
# get a parameter, saving groups for name and value (value still needs parsing)
patterns['params_grouped'] = r"""
; ( %(name)s )
(?: =
(
(?: (?: %(param_value)s ) )? # 0 or more parameter values, multiple
(?: , (?: %(param_value)s ) )* # parameters are comma separated
)
)?
""" % patterns
# get a full content line, break it up into group, name, parameters, and value
patterns['line'] = r"""
^ ((?P<group> %(name)s)\.)?(?P<name> %(name)s) # name group
(?P<params> (?: %(param)s )* ) # params group (may be empty)
: (?P<value> .* )$ # value group
""" % patterns
' "%(qsafe_char)s*" | %(safe_char)s* '
param_values_re = re.compile(patterns['param_value_grouped'], re.VERBOSE)
params_re = re.compile(patterns['params_grouped'], re.VERBOSE)
line_re = re.compile(patterns['line'], re.DOTALL | re.VERBOSE)
begin_re = re.compile('BEGIN', re.IGNORECASE)
def parseParams(string):
"""
>>> parseParams(';ALTREP="http://www.wiz.org"')
[['ALTREP', 'http://www.wiz.org']]
>>> parseParams('')
[]
>>> parseParams(';ALTREP="http://www.wiz.org;;",Blah,Foo;NEXT=Nope;BAR')
[['ALTREP', 'http://www.wiz.org;;', 'Blah', 'Foo'], ['NEXT', 'Nope'], ['BAR']]
"""
all = params_re.findall(string)
allParameters = []
for tup in all:
paramList = [tup[0]] # tup looks like (name, valuesString)
for pair in param_values_re.findall(tup[1]):
# pair looks like ('', value) or (value, '')
if pair[0] != '':
paramList.append(pair[0])
else:
paramList.append(pair[1])
allParameters.append(paramList)
return allParameters
def parseLine(line, lineNumber = None):
"""
>>> parseLine("BLAH:")
('BLAH', [], '', None)
>>> parseLine("RDATE:VALUE=DATE:19970304,19970504,19970704,19970904")
('RDATE', [], 'VALUE=DATE:19970304,19970504,19970704,19970904', None)
>>> parseLine('DESCRIPTION;ALTREP="http://www.wiz.org":The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA')
('DESCRIPTION', [['ALTREP', 'http://www.wiz.org']], 'The Fall 98 Wild Wizards Conference - - Las Vegas, NV, USA', None)
>>> parseLine("EMAIL;PREF;INTERNET:john@nowhere.com")
('EMAIL', [['PREF'], ['INTERNET']], 'john@nowhere.com', None)
>>> parseLine('EMAIL;TYPE="blah",hah;INTERNET="DIGI",DERIDOO:john@nowhere.com')
('EMAIL', [['TYPE', 'blah', 'hah'], ['INTERNET', 'DIGI', 'DERIDOO']], 'john@nowhere.com', None)
>>> parseLine('item1.ADR;type=HOME;type=pref:;;Reeperbahn 116;Hamburg;;20359;')
('ADR', [['type', 'HOME'], ['type', 'pref']], ';;Reeperbahn 116;Hamburg;;20359;', 'item1')
>>> parseLine(":")
Traceback (most recent call last):
...
ParseError: 'Failed to parse line: :'
"""
match = line_re.match(line)
if match is None:
raise ParseError("Failed to parse line: %s" % line, lineNumber)
# Underscores are replaced with dash to work around Lotus Notes
return (match.group('name').replace('_','-'),
parseParams(match.group('params')),
match.group('value'), match.group('group'))
# logical line regular expressions
patterns['lineend'] = r'(?:\r\n|\r|\n|$)'
patterns['wrap'] = r'%(lineend)s [\t ]' % patterns
patterns['logicallines'] = r"""
(
(?: [^\r\n] | %(wrap)s )*
%(lineend)s
)
""" % patterns
patterns['wraporend'] = r'(%(wrap)s | %(lineend)s )' % patterns
wrap_re = re.compile(patterns['wraporend'], re.VERBOSE)
logical_lines_re = re.compile(patterns['logicallines'], re.VERBOSE)
testLines="""
Line 0 text
, Line 0 continued.
Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2 is a new line, it does not start with whitespace.
"""
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
>>> import StringIO
>>> f=StringIO.StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print "Line %s: %s" % (n, l[0])
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
bytes = fp.read(-1)
if len(bytes) > 0:
if type(bytes[0]) == unicode:
val = bytes
elif not findBegin:
val = bytes.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = bytes.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError, 'Could not find BEGIN when trying to determine encoding'
else:
val = bytes
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable=False
newbuffer = StringIO.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.pos > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable=False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable=False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.pos > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# hack to deal with the fact that vCard 2.1 allows parameters to be
# encoded without a parameter name. False positives are unlikely, but
# possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.pos > 0:
yield logicalLine.getvalue(), lineStartNumber
def textLineToContentLine(text, n=None):
return ContentLine(*parseLine(text, n), **{'encoded':True, 'lineNumber' : n})
def dquoteEscape(param):
"""Return param, or "param" if ',' or ';' or ':' is in param."""
if param.find('"') >= 0:
raise VObjectError("Double quotes aren't allowed in parameter values.")
for char in ',;:':
if param.find(char) >= 0:
return '"'+ param + '"'
return param
def foldOneLine(outbuf, input, lineLength = 75):
# Folding line procedure that ensures multi-byte utf-8 sequences are not broken
# across lines
if len(input) < lineLength:
# Optimize for unfolded line case
outbuf.write(input)
else:
# Look for valid utf8 range and write that out
start = 0
written = 0
while written < len(input):
# Start max length -1 chars on from where we are
offset = start + lineLength - 1
if offset >= len(input):
line = input[start:]
outbuf.write(line)
written = len(input)
else:
# Check whether next char is valid utf8 lead byte
while (input[offset] > 0x7F) and ((ord(input[offset]) & 0xC0) == 0x80):
# Step back until we have a valid char
offset -= 1
line = input[start:offset]
outbuf.write(line)
outbuf.write("\r\n ")
written += offset - start
start = offset
outbuf.write("\r\n")
def defaultSerialize(obj, buf, lineLength):
"""Encode and fold obj and its children, write to buf or return a string."""
outbuf = buf or cStringIO.StringIO()
if isinstance(obj, Component):
if obj.group is None:
groupString = ''
else:
groupString = obj.group + '.'
if obj.useBegin:
foldOneLine(outbuf, str(groupString + u"BEGIN:" + obj.name), lineLength)
for child in obj.getSortedChildren():
#validate is recursive, we only need to validate once
child.serialize(outbuf, lineLength, validate=False)
if obj.useBegin:
foldOneLine(outbuf, str(groupString + u"END:" + obj.name), lineLength)
elif isinstance(obj, ContentLine):
startedEncoded = obj.encoded
if obj.behavior and not startedEncoded: obj.behavior.encode(obj)
s=codecs.getwriter('utf-8')(cStringIO.StringIO()) #unfolded buffer
if obj.group is not None:
s.write(obj.group + '.')
s.write(obj.name.upper())
for key, paramvals in obj.params.iteritems():
s.write(';' + key + '=' + ','.join(dquoteEscape(p) for p in paramvals))
s.write(':' + obj.value)
if obj.behavior and not startedEncoded: obj.behavior.decode(obj)
foldOneLine(outbuf, s.getvalue(), lineLength)
return buf or outbuf.getvalue()
testVCalendar="""
BEGIN:VCALENDAR
BEGIN:VEVENT
SUMMARY;blah=hi!:Bastille Day Party
END:VEVENT
END:VCALENDAR"""
class Stack:
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def top(self):
if len(self) == 0: return None
else: return self.stack[-1]
def topName(self):
if len(self) == 0: return None
else: return self.stack[-1].name
def modifyTop(self, item):
top = self.top()
if top:
top.add(item)
else:
new = Component()
self.push(new)
new.add(item) #add sets behavior for item and children
def push(self, obj): self.stack.append(obj)
def pop(self): return self.stack.pop()
def readComponents(streamOrString, validate=False, transform=True,
findBegin=True, ignoreUnreadable=False,
allowQP=False):
"""Generate one Component at a time from a stream.
>>> import StringIO
>>> f = StringIO.StringIO(testVCalendar)
>>> cal=readComponents(f).next()
>>> cal
<VCALENDAR| [<VEVENT| [<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>]>]>
>>> cal.vevent.summary
<SUMMARY{u'BLAH': [u'hi!']}Bastille Day Party>
"""
if isinstance(streamOrString, basestring):
stream = StringIO.StringIO(streamOrString)
else:
stream = streamOrString
try:
stack = Stack()
versionLine = None
n = 0
for line, n in getLogicalLines(stream, allowQP, findBegin):
if ignoreUnreadable:
try:
vline = textLineToContentLine(line, n)
except VObjectError, e:
if e.lineNumber is not None:
msg = "Skipped line %(lineNumber)s, message: %(msg)s"
else:
msg = "Skipped a line, message: %(msg)s"
logger.error(msg % {'lineNumber' : e.lineNumber,
'msg' : e.message})
continue
else:
vline = textLineToContentLine(line, n)
if vline.name == "VERSION":
versionLine = vline
stack.modifyTop(vline)
elif vline.name == "BEGIN":
stack.push(Component(vline.value, group=vline.group))
elif vline.name == "PROFILE":
if not stack.top(): stack.push(Component())
stack.top().setProfile(vline.value)
elif vline.name == "END":
if len(stack) == 0:
err = "Attempted to end the %s component, \
but it was never opened" % vline.value
raise ParseError(err, n)
if vline.value.upper() == stack.topName(): #START matches END
if len(stack) == 1:
component=stack.pop()
if versionLine is not None:
component.setBehaviorFromVersionLine(versionLine)
else:
behavior = getBehavior(component.name)
if behavior:
component.setBehavior(behavior)
if validate: component.validate(raiseException=True)
if transform: component.transformChildrenToNative()
yield component #EXIT POINT
else: stack.modifyTop(stack.pop())
else:
err = "%s component wasn't closed"
raise ParseError(err % stack.topName(), n)
else: stack.modifyTop(vline) #not a START or END line
if stack.top():
if stack.topName() is None:
logger.warning("Top level component was never named")
elif stack.top().useBegin:
raise ParseError("Component %s was never closed" % (stack.topName()), n)
yield stack.pop()
except ParseError, e:
e.input = streamOrString
raise
def readOne(stream, validate=False, transform=True, findBegin=True,
ignoreUnreadable=False, allowQP=False):
"""Return the first component from stream."""
return readComponents(stream, validate, transform, findBegin,
ignoreUnreadable, allowQP).next()
#--------------------------- version registry ----------------------------------
__behaviorRegistry={}
def registerBehavior(behavior, name=None, default=False, id=None):
"""Register the given behavior.
If default is True (or if this is the first version registered with this
name), the version will be the default if no id is given.
"""
if not name: name=behavior.name.upper()
if id is None: id=behavior.versionString
if name in __behaviorRegistry:
if default:
__behaviorRegistry[name].insert(0, (id, behavior))
else:
__behaviorRegistry[name].append((id, behavior))
else:
__behaviorRegistry[name]=[(id, behavior)]
def getBehavior(name, id=None):
"""Return a matching behavior if it exists, or None.
If id is None, return the default for name.
"""
name=name.upper()
if name in __behaviorRegistry:
if id:
for n, behavior in __behaviorRegistry[name]:
if n==id:
return behavior
return __behaviorRegistry[name][0][1]
return None
def newFromBehavior(name, id=None):
"""Given a name, return a behaviored ContentLine or Component."""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named %s" % name)
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj
#--------------------------- Helper function -----------------------------------
def backslashEscape(s):
s=s.replace("\\","\\\\").replace(";","\;").replace(",","\,")
return s.replace("\r\n", "\\n").replace("\n","\\n").replace("\r","\\n")
#------------------- Testing and running functions -----------------------------
if __name__ == '__main__':
import tests
tests._test()
| Python |
"""Definitions and behavior for vCard 3.0"""
import behavior
import itertools
from base import VObjectError, NativeError, ValidateError, ParseError, \
VBase, Component, ContentLine, logger, defaultSerialize, \
registerBehavior, backslashEscape, ascii
from icalendar import stringToTextValues
#------------------------ vCard structs ----------------------------------------
class Name(object):
def __init__(self, family = '', given = '', additional = '', prefix = '',
suffix = ''):
"""Each name attribute can be a string or a list of strings."""
self.family = family
self.given = given
self.additional = additional
self.prefix = prefix
self.suffix = suffix
@staticmethod
def toString(val):
"""Turn a string or array value into a string."""
if type(val) in (list, tuple):
return ' '.join(val)
return val
def __str__(self):
eng_order = ('prefix', 'given', 'additional', 'family', 'suffix')
out = ' '.join(self.toString(getattr(self, val)) for val in eng_order)
return ascii(out)
def __repr__(self):
return "<Name: %s>" % self.__str__()
def __eq__(self, other):
try:
return (self.family == other.family and
self.given == other.given and
self.additional == other.additional and
self.prefix == other.prefix and
self.suffix == other.suffix)
except:
return False
class Address(object):
def __init__(self, street = '', city = '', region = '', code = '',
country = '', box = '', extended = ''):
"""Each name attribute can be a string or a list of strings."""
self.box = box
self.extended = extended
self.street = street
self.city = city
self.region = region
self.code = code
self.country = country
@staticmethod
def toString(val, join_char='\n'):
"""Turn a string or array value into a string."""
if type(val) in (list, tuple):
return join_char.join(val)
return val
lines = ('box', 'extended', 'street')
one_line = ('city', 'region', 'code')
def __str__(self):
lines = '\n'.join(self.toString(getattr(self, val)) for val in self.lines if getattr(self, val))
one_line = tuple(self.toString(getattr(self, val), ' ') for val in self.one_line)
lines += "\n%s, %s %s" % one_line
if self.country:
lines += '\n' + self.toString(self.country)
return ascii(lines)
def __repr__(self):
return "<Address: %s>" % repr(str(self))[1:-1]
def __eq__(self, other):
try:
return (self.box == other.box and
self.extended == other.extended and
self.street == other.street and
self.city == other.city and
self.region == other.region and
self.code == other.code and
self.country == other.country)
except:
False
#------------------------ Registered Behavior subclasses -----------------------
class VCardTextBehavior(behavior.Behavior):
"""Provide backslash escape encoding/decoding for single valued properties.
TextBehavior also deals with base64 encoding if the ENCODING parameter is
explicitly set to BASE64.
"""
allowGroup = True
base64string = 'B'
@classmethod
def decode(cls, line):
"""Remove backslash escaping from line.valueDecode line, either to remove
backslash espacing, or to decode base64 encoding. The content line should
contain a ENCODING=b for base64 encoding, but Apple Addressbook seems to
export a singleton parameter of 'BASE64', which does not match the 3.0
vCard spec. If we encouter that, then we transform the parameter to
ENCODING=b"""
if line.encoded:
if 'BASE64' in line.singletonparams:
line.singletonparams.remove('BASE64')
line.encoding_param = cls.base64string
encoding = getattr(line, 'encoding_param', None)
if encoding:
line.value = line.value.decode('base64')
else:
line.value = stringToTextValues(line.value)[0]
line.encoded=False
@classmethod
def encode(cls, line):
"""Backslash escape line.value."""
if not line.encoded:
encoding = getattr(line, 'encoding_param', None)
if encoding and encoding.upper() == cls.base64string:
line.value = line.value.encode('base64').replace('\n', '')
else:
line.value = backslashEscape(line.value)
line.encoded=True
class VCardBehavior(behavior.Behavior):
allowGroup = True
defaultBehavior = VCardTextBehavior
class VCard3_0(VCardBehavior):
"""vCard 3.0 behavior."""
name = 'VCARD'
description = 'vCard 3.0, defined in rfc2426'
versionString = '3.0'
isComponent = True
sortFirst = ('version', 'prodid', 'uid')
knownChildren = {'N': (1, 1, None),#min, max, behaviorRegistry id
'FN': (1, 1, None),
'VERSION': (1, 1, None),#required, auto-generated
'PRODID': (0, 1, None),
'LABEL': (0, None, None),
'UID': (0, None, None),
'ADR': (0, None, None),
'ORG': (0, None, None),
'PHOTO': (0, None, None),
'CATEGORIES':(0, None, None)
}
@classmethod
def generateImplicitParameters(cls, obj):
"""Create PRODID, VERSION, and VTIMEZONEs if needed.
VTIMEZONEs will need to exist whenever TZID parameters exist or when
datetimes with tzinfo exist.
"""
if not hasattr(obj, 'version'):
obj.add(ContentLine('VERSION', [], cls.versionString))
registerBehavior(VCard3_0, default=True)
class FN(VCardTextBehavior):
name = "FN"
description = 'Formatted name'
registerBehavior(FN)
class Label(VCardTextBehavior):
name = "Label"
description = 'Formatted address'
registerBehavior(Label)
wacky_apple_photo_serialize = True
REALLY_LARGE = 1E50
class Photo(VCardTextBehavior):
name = "Photo"
description = 'Photograph'
@classmethod
def valueRepr( cls, line ):
return " (BINARY PHOTO DATA at 0x%s) " % id( line.value )
@classmethod
def serialize(cls, obj, buf, lineLength, validate):
"""Apple's Address Book is *really* weird with images, it expects
base64 data to have very specific whitespace. It seems Address Book
can handle PHOTO if it's not wrapped, so don't wrap it."""
if wacky_apple_photo_serialize:
lineLength = REALLY_LARGE
VCardTextBehavior.serialize(obj, buf, lineLength, validate)
registerBehavior(Photo)
def toListOrString(string):
stringList = stringToTextValues(string)
if len(stringList) == 1:
return stringList[0]
else:
return stringList
def splitFields(string):
"""Return a list of strings or lists from a Name or Address."""
return [toListOrString(i) for i in
stringToTextValues(string, listSeparator=';', charList=';')]
def toList(stringOrList):
if isinstance(stringOrList, basestring):
return [stringOrList]
return stringOrList
def serializeFields(obj, order=None):
"""Turn an object's fields into a ';' and ',' seperated string.
If order is None, obj should be a list, backslash escape each field and
return a ';' separated string.
"""
fields = []
if order is None:
fields = [backslashEscape(val) for val in obj]
else:
for field in order:
escapedValueList = [backslashEscape(val) for val in
toList(getattr(obj, field))]
fields.append(','.join(escapedValueList))
return ';'.join(fields)
NAME_ORDER = ('family', 'given', 'additional', 'prefix', 'suffix')
class NameBehavior(VCardBehavior):
"""A structured name."""
hasNative = True
@staticmethod
def transformToNative(obj):
"""Turn obj.value into a Name."""
if obj.isNative: return obj
obj.isNative = True
obj.value = Name(**dict(zip(NAME_ORDER, splitFields(obj.value))))
return obj
@staticmethod
def transformFromNative(obj):
"""Replace the Name in obj.value with a string."""
obj.isNative = False
obj.value = serializeFields(obj.value, NAME_ORDER)
return obj
registerBehavior(NameBehavior, 'N')
ADDRESS_ORDER = ('box', 'extended', 'street', 'city', 'region', 'code',
'country')
class AddressBehavior(VCardBehavior):
"""A structured address."""
hasNative = True
@staticmethod
def transformToNative(obj):
"""Turn obj.value into an Address."""
if obj.isNative: return obj
obj.isNative = True
obj.value = Address(**dict(zip(ADDRESS_ORDER, splitFields(obj.value))))
return obj
@staticmethod
def transformFromNative(obj):
"""Replace the Address in obj.value with a string."""
obj.isNative = False
obj.value = serializeFields(obj.value, ADDRESS_ORDER)
return obj
registerBehavior(AddressBehavior, 'ADR')
class OrgBehavior(VCardBehavior):
"""A list of organization values and sub-organization values."""
hasNative = True
@staticmethod
def transformToNative(obj):
"""Turn obj.value into a list."""
if obj.isNative: return obj
obj.isNative = True
obj.value = splitFields(obj.value)
return obj
@staticmethod
def transformFromNative(obj):
"""Replace the list in obj.value with a string."""
if not obj.isNative: return obj
obj.isNative = False
obj.value = serializeFields(obj.value)
return obj
registerBehavior(OrgBehavior, 'ORG')
| Python |
"""Translate an ics file's events to a different timezone."""
from optparse import OptionParser
from vobject import icalendar, base
import sys
try:
import PyICU
except:
PyICU = None
from datetime import datetime
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):
for vevent in getattr(cal, 'vevent_list', []):
start = getattr(vevent, 'dtstart', None)
end = getattr(vevent, 'dtend', None)
for node in (start, end):
if node:
dt = node.value
if (isinstance(dt, datetime) and
(not utc_only or dt.tzinfo == utc_tz)):
if dt.tzinfo is None:
dt = dt.replace(tzinfo = default)
node.value = dt.astimezone(new_timezone)
def main():
options, args = get_options()
if PyICU is None:
print "Failure. change_tz requires PyICU, exiting"
elif options.list:
for tz_string in PyICU.TimeZone.createEnumeration():
print tz_string
elif args:
utc_only = options.utc
if utc_only:
which = "only UTC"
else:
which = "all"
print "Converting %s events" % which
ics_file = args[0]
if len(args) > 1:
timezone = PyICU.ICUtzinfo.getInstance(args[1])
else:
timezone = PyICU.ICUtzinfo.default
print "... Reading %s" % ics_file
cal = base.readOne(file(ics_file))
change_tz(cal, timezone, PyICU.ICUtzinfo.default, utc_only)
out_name = ics_file + '.converted'
print "... Writing %s" % out_name
out = file(out_name, 'wb')
cal.serialize(out)
print "Done"
version = "0.1"
def get_options():
##### Configuration options #####
usage = """usage: %prog [options] ics_file [timezone]"""
parser = OptionParser(usage=usage, version=version)
parser.set_description("change_tz will convert the timezones in an ics file. ")
parser.add_option("-u", "--only-utc", dest="utc", action="store_true",
default=False, help="Only change UTC events.")
parser.add_option("-l", "--list", dest="list", action="store_true",
default=False, help="List available timezones")
(cmdline_options, args) = parser.parse_args()
if not args and not cmdline_options.list:
print "error: too few arguments given"
print
print parser.format_help()
return False, False
return cmdline_options, args
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Aborted"
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.