repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
bdcht/grandalf
|
grandalf/layouts.py
|
Layer._meanvalueattr
|
python
|
def _meanvalueattr(self,v):
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars)
|
find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L211-L220
|
[
"def prevlayer(self):\n return self.lower if self.layout.dirv==+1 else self.upper\n"
] |
class Layer(list):
"""
Layer is where Sugiyama layout organises vertices in hierarchical lists.
The placement of a vertex is done by the Sugiyama class, but it highly relies on
the *ordering* of vertices in each layer to reduce crossings.
This ordering depends on the neighbors found in the upper or lower layers.
Attributes:
layout (SugiyamaLayout): a reference to the sugiyama layout instance that
contains this layer
upper (Layer): a reference to the *upper* layer (rank-1)
lower (Layer): a reference to the *lower* layer (rank+1)
ccount (int) : number of crossings detected in this layer
Methods:
setup (layout): set initial attributes values from provided layout
nextlayer(): returns *next* layer in the current layout's direction parameter.
prevlayer(): returns *previous* layer in the current layout's direction parameter.
order(): compute *optimal* ordering of vertices within the layer.
"""
__r = None
layout = None
upper = None
lower = None
__x = 1.
ccount = None
def __str__(self):
s = '<Layer %d'%self.__r
s += ', len=%d'%len(self)
xc = self.ccount or '?'
s += ', crossings=%s>'%xc
return s
def setup(self,layout):
self.layout = layout
r = layout.layers.index(self)
self.__r=r
if len(self)>1: self.__x = 1./(len(self)-1)
for i,v in enumerate(self):
assert layout.grx[v].rank==r
layout.grx[v].pos = i
layout.grx[v].bar = i*self.__x
if r>0:
self.upper = layout.layers[r-1]
if r<len(layout.layers)-1:
self.lower = layout.layers[r+1]
def nextlayer(self):
return self.lower if self.layout.dirv==-1 else self.upper
def prevlayer(self):
return self.lower if self.layout.dirv==+1 else self.upper
def order(self):
sug = self.layout
sug._edge_inverter()
c = self._cc()
if c>0:
for v in self: sug.grx[v].bar = self._meanvalueattr(v)
# now resort layers l according to bar value:
self.sort(key=lambda x: sug.grx[x].bar)
# reduce & count crossings:
c = self._ordering_reduce_crossings()
# assign new position in layer l:
for i,v in enumerate(self):
sug.grx[v].pos = i
sug.grx[v].bar = i*self.__x
sug._edge_inverter()
self.ccount = c
return c
def _medianindex(self,v):
"""
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
"""
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]]
def _neighbors(self,v):
"""
neighbors refer to upper/lower adjacent nodes.
Note that v.N() provides neighbors of v in the graph, while
this method provides the Vertex and DummyVertex adjacent to v in the
upper or lower layer (depending on layout.dirv state).
"""
assert self.layout.dag
dirv = self.layout.dirv
grxv = self.layout.grx[v]
try: #(cache)
return grxv.nvs[dirv]
except AttributeError:
grxv.nvs={-1:v.N(-1),+1:v.N(+1)}
if grxv.dummy: return grxv.nvs[dirv]
# v is real, v.N are graph neigbors but we need layers neighbors
for d in (-1,+1):
tr=grxv.rank+d
for i,x in enumerate(v.N(d)):
if self.layout.grx[x].rank==tr:continue
e=v.e_with(x)
dum = self.layout.ctrls[e][tr]
grxv.nvs[d][i]=dum
return grxv.nvs[dirv]
def _crossings(self):
"""
counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[]))
"""
g=self.layout.grx
P=[]
for v in self:
P.append([g[x].pos for x in self._neighbors(v)])
for i,p in enumerate(P):
candidates = sum(P[i+1:],[])
for j,e in enumerate(p):
p[j] = len(filter((lambda nx:nx<e), candidates))
del candidates
return P
def _cc(self):
"""
implementation of the efficient bilayer cross counting by insert-sort
(see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting")
"""
g=self.layout.grx
P=[]
for v in self:
P.extend(sorted([g[x].pos for x in self._neighbors(v)]))
# count inversions in P:
s = []
count = 0
for i,p in enumerate(P):
j = bisect(s,p)
if j<i: count += (i-j)
s.insert(j,p)
return count
def _ordering_reduce_crossings(self):
assert self.layout.dag
g = self.layout.grx
N = len(self)
X=0
for i,j in izip(xrange(N-1),xrange(1,N)):
vi = self[i]
vj = self[j]
ni = [g[v].bar for v in self._neighbors(vi)]
Xij=Xji=0
for nj in [g[v].bar for v in self._neighbors(vj)]:
x = len([nx for nx in ni if nx>nj])
Xij += x
Xji += len(ni)-x
if Xji<Xij:
self[i] = vj
self[j] = vi
X += Xji
else:
X += Xij
return X
|
bdcht/grandalf
|
grandalf/layouts.py
|
Layer._medianindex
|
python
|
def _medianindex(self,v):
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]]
|
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L222-L238
| null |
class Layer(list):
"""
Layer is where Sugiyama layout organises vertices in hierarchical lists.
The placement of a vertex is done by the Sugiyama class, but it highly relies on
the *ordering* of vertices in each layer to reduce crossings.
This ordering depends on the neighbors found in the upper or lower layers.
Attributes:
layout (SugiyamaLayout): a reference to the sugiyama layout instance that
contains this layer
upper (Layer): a reference to the *upper* layer (rank-1)
lower (Layer): a reference to the *lower* layer (rank+1)
ccount (int) : number of crossings detected in this layer
Methods:
setup (layout): set initial attributes values from provided layout
nextlayer(): returns *next* layer in the current layout's direction parameter.
prevlayer(): returns *previous* layer in the current layout's direction parameter.
order(): compute *optimal* ordering of vertices within the layer.
"""
__r = None
layout = None
upper = None
lower = None
__x = 1.
ccount = None
def __str__(self):
s = '<Layer %d'%self.__r
s += ', len=%d'%len(self)
xc = self.ccount or '?'
s += ', crossings=%s>'%xc
return s
def setup(self,layout):
self.layout = layout
r = layout.layers.index(self)
self.__r=r
if len(self)>1: self.__x = 1./(len(self)-1)
for i,v in enumerate(self):
assert layout.grx[v].rank==r
layout.grx[v].pos = i
layout.grx[v].bar = i*self.__x
if r>0:
self.upper = layout.layers[r-1]
if r<len(layout.layers)-1:
self.lower = layout.layers[r+1]
def nextlayer(self):
return self.lower if self.layout.dirv==-1 else self.upper
def prevlayer(self):
return self.lower if self.layout.dirv==+1 else self.upper
def order(self):
sug = self.layout
sug._edge_inverter()
c = self._cc()
if c>0:
for v in self: sug.grx[v].bar = self._meanvalueattr(v)
# now resort layers l according to bar value:
self.sort(key=lambda x: sug.grx[x].bar)
# reduce & count crossings:
c = self._ordering_reduce_crossings()
# assign new position in layer l:
for i,v in enumerate(self):
sug.grx[v].pos = i
sug.grx[v].bar = i*self.__x
sug._edge_inverter()
self.ccount = c
return c
def _meanvalueattr(self,v):
"""
find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median.
"""
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars)
def _neighbors(self,v):
"""
neighbors refer to upper/lower adjacent nodes.
Note that v.N() provides neighbors of v in the graph, while
this method provides the Vertex and DummyVertex adjacent to v in the
upper or lower layer (depending on layout.dirv state).
"""
assert self.layout.dag
dirv = self.layout.dirv
grxv = self.layout.grx[v]
try: #(cache)
return grxv.nvs[dirv]
except AttributeError:
grxv.nvs={-1:v.N(-1),+1:v.N(+1)}
if grxv.dummy: return grxv.nvs[dirv]
# v is real, v.N are graph neigbors but we need layers neighbors
for d in (-1,+1):
tr=grxv.rank+d
for i,x in enumerate(v.N(d)):
if self.layout.grx[x].rank==tr:continue
e=v.e_with(x)
dum = self.layout.ctrls[e][tr]
grxv.nvs[d][i]=dum
return grxv.nvs[dirv]
def _crossings(self):
"""
counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[]))
"""
g=self.layout.grx
P=[]
for v in self:
P.append([g[x].pos for x in self._neighbors(v)])
for i,p in enumerate(P):
candidates = sum(P[i+1:],[])
for j,e in enumerate(p):
p[j] = len(filter((lambda nx:nx<e), candidates))
del candidates
return P
def _cc(self):
"""
implementation of the efficient bilayer cross counting by insert-sort
(see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting")
"""
g=self.layout.grx
P=[]
for v in self:
P.extend(sorted([g[x].pos for x in self._neighbors(v)]))
# count inversions in P:
s = []
count = 0
for i,p in enumerate(P):
j = bisect(s,p)
if j<i: count += (i-j)
s.insert(j,p)
return count
def _ordering_reduce_crossings(self):
assert self.layout.dag
g = self.layout.grx
N = len(self)
X=0
for i,j in izip(xrange(N-1),xrange(1,N)):
vi = self[i]
vj = self[j]
ni = [g[v].bar for v in self._neighbors(vi)]
Xij=Xji=0
for nj in [g[v].bar for v in self._neighbors(vj)]:
x = len([nx for nx in ni if nx>nj])
Xij += x
Xji += len(ni)-x
if Xji<Xij:
self[i] = vj
self[j] = vi
X += Xji
else:
X += Xij
return X
|
bdcht/grandalf
|
grandalf/layouts.py
|
Layer._neighbors
|
python
|
def _neighbors(self,v):
assert self.layout.dag
dirv = self.layout.dirv
grxv = self.layout.grx[v]
try: #(cache)
return grxv.nvs[dirv]
except AttributeError:
grxv.nvs={-1:v.N(-1),+1:v.N(+1)}
if grxv.dummy: return grxv.nvs[dirv]
# v is real, v.N are graph neigbors but we need layers neighbors
for d in (-1,+1):
tr=grxv.rank+d
for i,x in enumerate(v.N(d)):
if self.layout.grx[x].rank==tr:continue
e=v.e_with(x)
dum = self.layout.ctrls[e][tr]
grxv.nvs[d][i]=dum
return grxv.nvs[dirv]
|
neighbors refer to upper/lower adjacent nodes.
Note that v.N() provides neighbors of v in the graph, while
this method provides the Vertex and DummyVertex adjacent to v in the
upper or lower layer (depending on layout.dirv state).
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L240-L263
| null |
class Layer(list):
"""
Layer is where Sugiyama layout organises vertices in hierarchical lists.
The placement of a vertex is done by the Sugiyama class, but it highly relies on
the *ordering* of vertices in each layer to reduce crossings.
This ordering depends on the neighbors found in the upper or lower layers.
Attributes:
layout (SugiyamaLayout): a reference to the sugiyama layout instance that
contains this layer
upper (Layer): a reference to the *upper* layer (rank-1)
lower (Layer): a reference to the *lower* layer (rank+1)
ccount (int) : number of crossings detected in this layer
Methods:
setup (layout): set initial attributes values from provided layout
nextlayer(): returns *next* layer in the current layout's direction parameter.
prevlayer(): returns *previous* layer in the current layout's direction parameter.
order(): compute *optimal* ordering of vertices within the layer.
"""
__r = None
layout = None
upper = None
lower = None
__x = 1.
ccount = None
def __str__(self):
s = '<Layer %d'%self.__r
s += ', len=%d'%len(self)
xc = self.ccount or '?'
s += ', crossings=%s>'%xc
return s
def setup(self,layout):
self.layout = layout
r = layout.layers.index(self)
self.__r=r
if len(self)>1: self.__x = 1./(len(self)-1)
for i,v in enumerate(self):
assert layout.grx[v].rank==r
layout.grx[v].pos = i
layout.grx[v].bar = i*self.__x
if r>0:
self.upper = layout.layers[r-1]
if r<len(layout.layers)-1:
self.lower = layout.layers[r+1]
def nextlayer(self):
return self.lower if self.layout.dirv==-1 else self.upper
def prevlayer(self):
return self.lower if self.layout.dirv==+1 else self.upper
def order(self):
sug = self.layout
sug._edge_inverter()
c = self._cc()
if c>0:
for v in self: sug.grx[v].bar = self._meanvalueattr(v)
# now resort layers l according to bar value:
self.sort(key=lambda x: sug.grx[x].bar)
# reduce & count crossings:
c = self._ordering_reduce_crossings()
# assign new position in layer l:
for i,v in enumerate(self):
sug.grx[v].pos = i
sug.grx[v].bar = i*self.__x
sug._edge_inverter()
self.ccount = c
return c
def _meanvalueattr(self,v):
"""
find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median.
"""
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars)
def _medianindex(self,v):
"""
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
"""
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]]
def _crossings(self):
"""
counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[]))
"""
g=self.layout.grx
P=[]
for v in self:
P.append([g[x].pos for x in self._neighbors(v)])
for i,p in enumerate(P):
candidates = sum(P[i+1:],[])
for j,e in enumerate(p):
p[j] = len(filter((lambda nx:nx<e), candidates))
del candidates
return P
def _cc(self):
"""
implementation of the efficient bilayer cross counting by insert-sort
(see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting")
"""
g=self.layout.grx
P=[]
for v in self:
P.extend(sorted([g[x].pos for x in self._neighbors(v)]))
# count inversions in P:
s = []
count = 0
for i,p in enumerate(P):
j = bisect(s,p)
if j<i: count += (i-j)
s.insert(j,p)
return count
def _ordering_reduce_crossings(self):
assert self.layout.dag
g = self.layout.grx
N = len(self)
X=0
for i,j in izip(xrange(N-1),xrange(1,N)):
vi = self[i]
vj = self[j]
ni = [g[v].bar for v in self._neighbors(vi)]
Xij=Xji=0
for nj in [g[v].bar for v in self._neighbors(vj)]:
x = len([nx for nx in ni if nx>nj])
Xij += x
Xji += len(ni)-x
if Xji<Xij:
self[i] = vj
self[j] = vi
X += Xji
else:
X += Xij
return X
|
bdcht/grandalf
|
grandalf/layouts.py
|
Layer._crossings
|
python
|
def _crossings(self):
g=self.layout.grx
P=[]
for v in self:
P.append([g[x].pos for x in self._neighbors(v)])
for i,p in enumerate(P):
candidates = sum(P[i+1:],[])
for j,e in enumerate(p):
p[j] = len(filter((lambda nx:nx<e), candidates))
del candidates
return P
|
counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[]))
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L265-L282
| null |
class Layer(list):
"""
Layer is where Sugiyama layout organises vertices in hierarchical lists.
The placement of a vertex is done by the Sugiyama class, but it highly relies on
the *ordering* of vertices in each layer to reduce crossings.
This ordering depends on the neighbors found in the upper or lower layers.
Attributes:
layout (SugiyamaLayout): a reference to the sugiyama layout instance that
contains this layer
upper (Layer): a reference to the *upper* layer (rank-1)
lower (Layer): a reference to the *lower* layer (rank+1)
ccount (int) : number of crossings detected in this layer
Methods:
setup (layout): set initial attributes values from provided layout
nextlayer(): returns *next* layer in the current layout's direction parameter.
prevlayer(): returns *previous* layer in the current layout's direction parameter.
order(): compute *optimal* ordering of vertices within the layer.
"""
__r = None
layout = None
upper = None
lower = None
__x = 1.
ccount = None
def __str__(self):
s = '<Layer %d'%self.__r
s += ', len=%d'%len(self)
xc = self.ccount or '?'
s += ', crossings=%s>'%xc
return s
def setup(self,layout):
self.layout = layout
r = layout.layers.index(self)
self.__r=r
if len(self)>1: self.__x = 1./(len(self)-1)
for i,v in enumerate(self):
assert layout.grx[v].rank==r
layout.grx[v].pos = i
layout.grx[v].bar = i*self.__x
if r>0:
self.upper = layout.layers[r-1]
if r<len(layout.layers)-1:
self.lower = layout.layers[r+1]
def nextlayer(self):
return self.lower if self.layout.dirv==-1 else self.upper
def prevlayer(self):
return self.lower if self.layout.dirv==+1 else self.upper
def order(self):
sug = self.layout
sug._edge_inverter()
c = self._cc()
if c>0:
for v in self: sug.grx[v].bar = self._meanvalueattr(v)
# now resort layers l according to bar value:
self.sort(key=lambda x: sug.grx[x].bar)
# reduce & count crossings:
c = self._ordering_reduce_crossings()
# assign new position in layer l:
for i,v in enumerate(self):
sug.grx[v].pos = i
sug.grx[v].bar = i*self.__x
sug._edge_inverter()
self.ccount = c
return c
def _meanvalueattr(self,v):
"""
find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median.
"""
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars)
def _medianindex(self,v):
"""
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
"""
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]]
def _neighbors(self,v):
"""
neighbors refer to upper/lower adjacent nodes.
Note that v.N() provides neighbors of v in the graph, while
this method provides the Vertex and DummyVertex adjacent to v in the
upper or lower layer (depending on layout.dirv state).
"""
assert self.layout.dag
dirv = self.layout.dirv
grxv = self.layout.grx[v]
try: #(cache)
return grxv.nvs[dirv]
except AttributeError:
grxv.nvs={-1:v.N(-1),+1:v.N(+1)}
if grxv.dummy: return grxv.nvs[dirv]
# v is real, v.N are graph neigbors but we need layers neighbors
for d in (-1,+1):
tr=grxv.rank+d
for i,x in enumerate(v.N(d)):
if self.layout.grx[x].rank==tr:continue
e=v.e_with(x)
dum = self.layout.ctrls[e][tr]
grxv.nvs[d][i]=dum
return grxv.nvs[dirv]
def _cc(self):
"""
implementation of the efficient bilayer cross counting by insert-sort
(see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting")
"""
g=self.layout.grx
P=[]
for v in self:
P.extend(sorted([g[x].pos for x in self._neighbors(v)]))
# count inversions in P:
s = []
count = 0
for i,p in enumerate(P):
j = bisect(s,p)
if j<i: count += (i-j)
s.insert(j,p)
return count
def _ordering_reduce_crossings(self):
assert self.layout.dag
g = self.layout.grx
N = len(self)
X=0
for i,j in izip(xrange(N-1),xrange(1,N)):
vi = self[i]
vj = self[j]
ni = [g[v].bar for v in self._neighbors(vi)]
Xij=Xji=0
for nj in [g[v].bar for v in self._neighbors(vj)]:
x = len([nx for nx in ni if nx>nj])
Xij += x
Xji += len(ni)-x
if Xji<Xij:
self[i] = vj
self[j] = vi
X += Xji
else:
X += Xij
return X
|
bdcht/grandalf
|
grandalf/layouts.py
|
Layer._cc
|
python
|
def _cc(self):
g=self.layout.grx
P=[]
for v in self:
P.extend(sorted([g[x].pos for x in self._neighbors(v)]))
# count inversions in P:
s = []
count = 0
for i,p in enumerate(P):
j = bisect(s,p)
if j<i: count += (i-j)
s.insert(j,p)
return count
|
implementation of the efficient bilayer cross counting by insert-sort
(see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting")
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L284-L300
|
[
"def _neighbors(self,v):\n \"\"\"\n neighbors refer to upper/lower adjacent nodes.\n Note that v.N() provides neighbors of v in the graph, while\n this method provides the Vertex and DummyVertex adjacent to v in the\n upper or lower layer (depending on layout.dirv state).\n \"\"\"\n assert self.layout.dag\n dirv = self.layout.dirv\n grxv = self.layout.grx[v]\n try: #(cache)\n return grxv.nvs[dirv]\n except AttributeError:\n grxv.nvs={-1:v.N(-1),+1:v.N(+1)}\n if grxv.dummy: return grxv.nvs[dirv]\n # v is real, v.N are graph neigbors but we need layers neighbors\n for d in (-1,+1):\n tr=grxv.rank+d\n for i,x in enumerate(v.N(d)):\n if self.layout.grx[x].rank==tr:continue\n e=v.e_with(x)\n dum = self.layout.ctrls[e][tr]\n grxv.nvs[d][i]=dum\n return grxv.nvs[dirv]\n"
] |
class Layer(list):
"""
Layer is where Sugiyama layout organises vertices in hierarchical lists.
The placement of a vertex is done by the Sugiyama class, but it highly relies on
the *ordering* of vertices in each layer to reduce crossings.
This ordering depends on the neighbors found in the upper or lower layers.
Attributes:
layout (SugiyamaLayout): a reference to the sugiyama layout instance that
contains this layer
upper (Layer): a reference to the *upper* layer (rank-1)
lower (Layer): a reference to the *lower* layer (rank+1)
ccount (int) : number of crossings detected in this layer
Methods:
setup (layout): set initial attributes values from provided layout
nextlayer(): returns *next* layer in the current layout's direction parameter.
prevlayer(): returns *previous* layer in the current layout's direction parameter.
order(): compute *optimal* ordering of vertices within the layer.
"""
__r = None
layout = None
upper = None
lower = None
__x = 1.
ccount = None
def __str__(self):
s = '<Layer %d'%self.__r
s += ', len=%d'%len(self)
xc = self.ccount or '?'
s += ', crossings=%s>'%xc
return s
def setup(self,layout):
self.layout = layout
r = layout.layers.index(self)
self.__r=r
if len(self)>1: self.__x = 1./(len(self)-1)
for i,v in enumerate(self):
assert layout.grx[v].rank==r
layout.grx[v].pos = i
layout.grx[v].bar = i*self.__x
if r>0:
self.upper = layout.layers[r-1]
if r<len(layout.layers)-1:
self.lower = layout.layers[r+1]
def nextlayer(self):
return self.lower if self.layout.dirv==-1 else self.upper
def prevlayer(self):
return self.lower if self.layout.dirv==+1 else self.upper
def order(self):
sug = self.layout
sug._edge_inverter()
c = self._cc()
if c>0:
for v in self: sug.grx[v].bar = self._meanvalueattr(v)
# now resort layers l according to bar value:
self.sort(key=lambda x: sug.grx[x].bar)
# reduce & count crossings:
c = self._ordering_reduce_crossings()
# assign new position in layer l:
for i,v in enumerate(self):
sug.grx[v].pos = i
sug.grx[v].bar = i*self.__x
sug._edge_inverter()
self.ccount = c
return c
def _meanvalueattr(self,v):
"""
find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median.
"""
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars)
def _medianindex(self,v):
"""
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
"""
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]]
def _neighbors(self,v):
"""
neighbors refer to upper/lower adjacent nodes.
Note that v.N() provides neighbors of v in the graph, while
this method provides the Vertex and DummyVertex adjacent to v in the
upper or lower layer (depending on layout.dirv state).
"""
assert self.layout.dag
dirv = self.layout.dirv
grxv = self.layout.grx[v]
try: #(cache)
return grxv.nvs[dirv]
except AttributeError:
grxv.nvs={-1:v.N(-1),+1:v.N(+1)}
if grxv.dummy: return grxv.nvs[dirv]
# v is real, v.N are graph neigbors but we need layers neighbors
for d in (-1,+1):
tr=grxv.rank+d
for i,x in enumerate(v.N(d)):
if self.layout.grx[x].rank==tr:continue
e=v.e_with(x)
dum = self.layout.ctrls[e][tr]
grxv.nvs[d][i]=dum
return grxv.nvs[dirv]
def _crossings(self):
"""
counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[]))
"""
g=self.layout.grx
P=[]
for v in self:
P.append([g[x].pos for x in self._neighbors(v)])
for i,p in enumerate(P):
candidates = sum(P[i+1:],[])
for j,e in enumerate(p):
p[j] = len(filter((lambda nx:nx<e), candidates))
del candidates
return P
def _ordering_reduce_crossings(self):
assert self.layout.dag
g = self.layout.grx
N = len(self)
X=0
for i,j in izip(xrange(N-1),xrange(1,N)):
vi = self[i]
vj = self[j]
ni = [g[v].bar for v in self._neighbors(vi)]
Xij=Xji=0
for nj in [g[v].bar for v in self._neighbors(vj)]:
x = len([nx for nx in ni if nx>nj])
Xij += x
Xji += len(ni)-x
if Xji<Xij:
self[i] = vj
self[j] = vi
X += Xji
else:
X += Xij
return X
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.init_all
|
python
|
def init_all(self,roots=None,inverted_edges=None,optimize=False):
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
|
initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L378-L404
|
[
"def E(self,cond=None):\n E = self.sE\n if cond is None: cond=(lambda x:True)\n for e in E:\n if cond(e):\n yield e\n",
"def get_scs_with_feedback(self,roots=None):\n from sys import getrecursionlimit,setrecursionlimit\n limit=getrecursionlimit()\n N=self.norm()+10\n if N>limit:\n setrecursionlimit(N)\n def _visit(v,L):\n v.ind = v.ncur\n v.lowlink = v.ncur\n Vertex.ncur += 1\n self.tstack.append(v)\n v.mark = True\n for e in v.e_out():\n w = e.v[1]\n if w.ind==0:\n _visit(w,L)\n v.lowlink = min(v.lowlink,w.lowlink)\n elif w.mark:\n e.feedback = True\n if w in self.tstack:\n v.lowlink = min(v.lowlink,w.ind)\n if v.lowlink==v.ind:\n l=[self.tstack.pop()]\n while l[0]!=v:\n l.insert(0,self.tstack.pop())\n #print \"unstacked %s\"%('-'.join([x.data[1:13] for x in l]))\n L.append(l)\n v.mark=False\n if roots is None: roots=self.roots()\n self.tstack=[]\n scs = []\n Vertex.ncur=1\n for v in self.sV: v.ind=0\n # start exploring tree from roots:\n for v in roots:\n v = self.sV.get(v)\n if v.ind==0: _visit(v,scs)\n # now possibly unvisited vertices:\n for v in self.sV:\n if v.ind==0: _visit(v,scs)\n # clean up Tarjan-specific data:\n for v in self.sV:\n del v.ind\n del v.lowlink\n del v.mark\n del Vertex.ncur\n del self.tstack\n setrecursionlimit(limit)\n return scs\n",
"def rank_all(self,roots,optimize=False):\n \"\"\"Computes rank of all vertices.\n add provided roots to rank 0 vertices,\n otherwise update ranking from provided roots.\n The initial rank is based on precedence relationships,\n optimal ranking may be derived from network flow (simplex).\n \"\"\"\n self._edge_inverter()\n r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]\n self._rank_init(roots+r)\n if optimize: self._rank_optimize()\n self._edge_inverter()\n",
"def setdummies(self,e):\n \"\"\"creates and defines all needed dummy vertices for edge e.\n \"\"\"\n v0,v1 = e.v\n r0,r1 = self.grx[v0].rank,self.grx[v1].rank\n if r0>r1:\n assert e in self.alt_e\n v0,v1 = v1,v0\n r0,r1 = r1,r0\n if (r1-r0)>1:\n # \"dummy vertices\" are stored in the edge ctrl dict,\n # keyed by their rank in layers.\n ctrl=self.ctrls[e]={}\n ctrl[r0]=v0\n ctrl[r1]=v1\n for r in xrange(r0+1,r1):\n self.dummyctrl(r,ctrl)\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.draw
|
python
|
def draw(self,N=1.5):
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
|
compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L406-L418
|
[
"def ordering_step(self,oneway=False):\n \"\"\"iterator that computes all vertices ordering in their layers\n (one layer after the other from top to bottom, to top again unless\n oneway is True).\n \"\"\"\n self.dirv=-1\n crossings = 0\n for l in self.layers:\n mvmt = l.order()\n crossings += mvmt\n yield (l,mvmt)\n if oneway or (crossings == 0):\n return\n self.dirv=+1\n while l:\n mvmt = l.order()\n yield (l,mvmt)\n l = l.nextlayer()\n",
"def setxy(self):\n \"\"\"computes all vertex coordinates (x,y) using\n an algorithm by Brandes & Kopf.\n \"\"\"\n self._edge_inverter()\n self._detect_alignment_conflicts()\n inf = float('infinity')\n # initialize vertex coordinates attributes:\n for l in self.layers:\n for v in l:\n self.grx[v].root = v\n self.grx[v].align = v\n self.grx[v].sink = v\n self.grx[v].shift = inf\n self.grx[v].X = None\n self.grx[v].x = [0.0]*4\n curvh = self.dirvh # save current dirvh value\n for dirvh in xrange(4):\n self.dirvh = dirvh\n self._coord_vertical_alignment()\n self._coord_horizontal_compact()\n self.dirvh = curvh # restore it\n # vertical coordinate assigment of all nodes:\n Y = 0\n for l in self.layers:\n dY = max([v.view.h/2. for v in l])\n for v in l:\n vx = sorted(self.grx[v].x)\n # mean of the 2 medians out of the 4 x-coord computed above:\n avgm = (vx[1]+vx[2])/2.\n # final xy-coordinates :\n v.view.xy = (avgm,Y+dY)\n Y += 2*dY+self.yspace\n self._edge_inverter()\n",
"def draw_edges(self):\n \"\"\"Basic edge routing applied only for edges with dummy points.\n Enhanced edge routing can be performed by using the apropriate\n *route_with_xxx* functions from :ref:routing_ in the edges' view.\n \"\"\"\n for e in self.g.E():\n if hasattr(e,'view'):\n l=[]\n r0,r1 = None,None\n if e in self.ctrls:\n D = self.ctrls[e]\n r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank\n if r0<r1:\n ranks = xrange(r0+1,r1)\n else:\n ranks = xrange(r0-1,r1,-1)\n l = [D[r].view.xy for r in ranks]\n l.insert(0,e.v[0].view.xy)\n l.append(e.v[1].view.xy)\n try:\n self.route_edge(e,l)\n except AttributeError:\n pass\n e.view.setpath(l)\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.rank_all
|
python
|
def rank_all(self,roots,optimize=False):
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
|
Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L461-L472
|
[
"def _edge_inverter(self):\n for e in self.alt_e:\n x,y = e.v\n e.v = (y,x)\n self.dag = not self.dag\n if self.dag:\n for e in self.g.degenerated_edges:\n e.detach()\n self.g.sE.remove(e)\n else:\n for e in self.g.degenerated_edges:\n self.g.add_edge(e)\n",
"def _rank_init(self,unranked):\n \"\"\"Computes rank of provided unranked list of vertices and all\n their children. A vertex will be asign a rank when all its \n inward edges have been *scanned*. When a vertex is asigned\n a rank, its outward edges are marked *scanned*.\n \"\"\"\n assert self.dag\n scan = {}\n # set rank of unranked based on its in-edges vertices ranks:\n while len(unranked)>0:\n l = []\n for v in unranked:\n self.setrank(v)\n # mark out-edges has scan-able:\n for e in v.e_out(): scan[e]=True\n # check if out-vertices are rank-able:\n for x in v.N(+1):\n if not (False in [scan.get(e,False) for e in x.e_in()]):\n if x not in l: l.append(x)\n unranked=l\n",
"def _rank_optimize(self):\n \"\"\"optimize ranking by pushing long edges toward lower layers as much as possible.\n see other interersting network flow solver to minimize total edge length\n (http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)\n \"\"\"\n assert self.dag\n for l in reversed(self.layers):\n for v in l:\n gv = self.grx[v]\n for x in v.N(-1):\n if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):\n gx = self.grx[x]\n self.layers[gx.rank].remove(x)\n gx.rank = gv.rank-1\n self.layers[gv.rank-1].append(x)\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout._rank_init
|
python
|
def _rank_init(self,unranked):
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
|
Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L474-L493
|
[
"def setrank(self,v):\n \"\"\"set rank value for vertex v and add it to the corresponding layer.\n The Layer is created if it is the first vertex with this rank.\n \"\"\"\n assert self.dag\n r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1\n self.grx[v].rank=r\n # add it to its layer:\n try:\n self.layers[r].append(v)\n except IndexError:\n assert r==len(self.layers)\n self.layers.append(Layer([v]))\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout._rank_optimize
|
python
|
def _rank_optimize(self):
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
|
optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L495-L509
| null |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.setrank
|
python
|
def setrank(self,v):
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
|
set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L512-L524
| null |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.dummyctrl
|
python
|
def dummyctrl(self,r,ctrl):
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
|
creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L526-L543
| null |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.setdummies
|
python
|
def setdummies(self,e):
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
|
creates and defines all needed dummy vertices for edge e.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L545-L561
|
[
"def dummyctrl(self,r,ctrl):\n \"\"\"creates a DummyVertex at rank r inserted in the ctrl dict\n of the associated edge and layer.\n\n Arguments:\n r (int): rank value\n ctrl (dict): the edge's control vertices\n\n Returns:\n DummyVertex : the created DummyVertex.\n \"\"\"\n dv = DummyVertex(r)\n dv.view.w,dv.view.h=self.dw,self.dh\n self.grx[dv] = dv\n dv.ctrl = ctrl\n ctrl[r] = dv\n self.layers[r].append(dv)\n return dv\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.draw_step
|
python
|
def draw_step(self):
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
|
iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L563-L572
|
[
"def ordering_step(self,oneway=False):\n \"\"\"iterator that computes all vertices ordering in their layers\n (one layer after the other from top to bottom, to top again unless\n oneway is True).\n \"\"\"\n self.dirv=-1\n crossings = 0\n for l in self.layers:\n mvmt = l.order()\n crossings += mvmt\n yield (l,mvmt)\n if oneway or (crossings == 0):\n return\n self.dirv=+1\n while l:\n mvmt = l.order()\n yield (l,mvmt)\n l = l.nextlayer()\n",
"def setxy(self):\n \"\"\"computes all vertex coordinates (x,y) using\n an algorithm by Brandes & Kopf.\n \"\"\"\n self._edge_inverter()\n self._detect_alignment_conflicts()\n inf = float('infinity')\n # initialize vertex coordinates attributes:\n for l in self.layers:\n for v in l:\n self.grx[v].root = v\n self.grx[v].align = v\n self.grx[v].sink = v\n self.grx[v].shift = inf\n self.grx[v].X = None\n self.grx[v].x = [0.0]*4\n curvh = self.dirvh # save current dirvh value\n for dirvh in xrange(4):\n self.dirvh = dirvh\n self._coord_vertical_alignment()\n self._coord_horizontal_compact()\n self.dirvh = curvh # restore it\n # vertical coordinate assigment of all nodes:\n Y = 0\n for l in self.layers:\n dY = max([v.view.h/2. for v in l])\n for v in l:\n vx = sorted(self.grx[v].x)\n # mean of the 2 medians out of the 4 x-coord computed above:\n avgm = (vx[1]+vx[2])/2.\n # final xy-coordinates :\n v.view.xy = (avgm,Y+dY)\n Y += 2*dY+self.yspace\n self._edge_inverter()\n",
"def draw_edges(self):\n \"\"\"Basic edge routing applied only for edges with dummy points.\n Enhanced edge routing can be performed by using the apropriate\n *route_with_xxx* functions from :ref:routing_ in the edges' view.\n \"\"\"\n for e in self.g.E():\n if hasattr(e,'view'):\n l=[]\n r0,r1 = None,None\n if e in self.ctrls:\n D = self.ctrls[e]\n r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank\n if r0<r1:\n ranks = xrange(r0+1,r1)\n else:\n ranks = xrange(r0-1,r1,-1)\n l = [D[r].view.xy for r in ranks]\n l.insert(0,e.v[0].view.xy)\n l.append(e.v[1].view.xy)\n try:\n self.route_edge(e,l)\n except AttributeError:\n pass\n e.view.setpath(l)\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.ordering_step
|
python
|
def ordering_step(self,oneway=False):
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
|
iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L574-L591
| null |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.setxy
|
python
|
def setxy(self):
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
|
computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L593-L626
|
[
"def _edge_inverter(self):\n for e in self.alt_e:\n x,y = e.v\n e.v = (y,x)\n self.dag = not self.dag\n if self.dag:\n for e in self.g.degenerated_edges:\n e.detach()\n self.g.sE.remove(e)\n else:\n for e in self.g.degenerated_edges:\n self.g.add_edge(e)\n",
"def _detect_alignment_conflicts(self):\n \"\"\"mark conflicts between edges:\n inner edges are edges between dummy nodes\n type 0 is regular crossing regular (or sharing vertex)\n type 1 is inner crossing regular (targeted crossings)\n type 2 is inner crossing inner (avoided by reduce_crossings phase)\n \"\"\"\n curvh = self.dirvh # save current dirvh value\n self.dirvh=0\n self.conflicts = []\n for L in self.layers:\n last = len(L)-1\n prev = L.prevlayer()\n if not prev: continue\n k0=0\n k1_init=len(prev)-1\n l=0\n for l1,v in enumerate(L):\n if not self.grx[v].dummy: continue\n if l1==last or v.inner(-1):\n k1=k1_init\n if v.inner(-1):\n k1=self.grx[v.N(-1)[-1]].pos\n for vl in L[l:l1+1]:\n for vk in L._neighbors(vl):\n k = self.grx[vk].pos\n if (k<k0 or k>k1):\n self.conflicts.append((vk,vl))\n l=l1+1\n k0=k1\n self.dirvh = curvh # restore it\n",
"def _coord_vertical_alignment(self):\n \"\"\"performs vertical alignment according to current dirvh internal state.\n \"\"\"\n dirh,dirv = self.dirh,self.dirv\n g = self.grx\n for l in self.layers[::-dirv]:\n if not l.prevlayer(): continue\n r=None\n for vk in l[::dirh]:\n for m in l._medianindex(vk):\n # take the median node in dirv layer:\n um = l.prevlayer()[m]\n # if vk is \"free\" align it with um's root\n if g[vk].align is vk:\n if dirv==1: vpair = (vk,um)\n else: vpair = (um,vk)\n # if vk<->um link is used for alignment\n if (vpair not in self.conflicts) and \\\n (r==None or dirh*r<dirh*m):\n g[um].align = vk\n g[vk].root = g[um].root\n g[vk].align = g[vk].root\n r = m\n",
"def _coord_horizontal_compact(self):\n limit=getrecursionlimit()\n N=len(self.layers)+10\n if N>limit:\n setrecursionlimit(N)\n dirh,dirv = self.dirh,self.dirv\n g = self.grx\n L = self.layers[::-dirv]\n # recursive placement of blocks:\n for l in L:\n for v in l[::dirh]:\n if g[v].root is v:\n self.__place_block(v)\n setrecursionlimit(limit)\n # mirror all nodes if right-aligned:\n if dirh==-1:\n for l in L:\n for v in l:\n x = g[v].X\n if x: g[v].X = -x\n # then assign x-coord of its root:\n inf=float('infinity')\n rb=inf\n for l in L:\n for v in l[::dirh]:\n g[v].x[self.dirvh] = g[g[v].root].X\n rs = g[g[v].root].sink\n s = g[rs].shift\n if s<inf:\n g[v].x[self.dirvh] += dirh*s\n rb = min(rb,g[v].x[self.dirvh])\n # normalize to 0, and reinit root/align/sink/shift/X\n for l in self.layers:\n for v in l:\n #g[v].x[dirvh] -= rb\n g[v].root = g[v].align = g[v].sink = v\n g[v].shift = inf\n g[v].X = None\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout._detect_alignment_conflicts
|
python
|
def _detect_alignment_conflicts(self):
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh
|
mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L628-L658
| null |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
# restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout._coord_vertical_alignment
|
python
|
def _coord_vertical_alignment(self):
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
|
performs vertical alignment according to current dirvh internal state.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L660-L682
| null |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
bdcht/grandalf
|
grandalf/layouts.py
|
SugiyamaLayout.draw_edges
|
python
|
def draw_edges(self):
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l)
|
Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
|
train
|
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L755-L778
|
[
"def E(self,cond=None):\n E = self.sE\n if cond is None: cond=(lambda x:True)\n for e in E:\n if cond(e):\n yield e\n"
] |
class SugiyamaLayout(object):
"""
The Sugiyama layout is the traditional "layered" graph layout called
*dot* in graphviz. This layout is quite efficient but heavily relies
on drawing heuristics. Adaptive drawing is limited to
extending the leaves only, but since the algorithm is quite fast
redrawing the entire graph (up to about a thousand nodes) gives
usually good results in less than a second.
The Sugiyama Layout Class takes as input a core_graph object and implements
an efficient drawing algorithm based on nodes dimensions provided through
a user-defined *view* property in each vertex.
Attributes:
dirvh (int): the current aligment state
order_inter (int): the default number of layer placement iterations
order_attr (str): set attribute name used for layer ordering
xspace (int): horizontal space between vertices in a layer
yspace (int): vertical space between layers
dw (int): default width of a vertex
dh (int): default height of a vertex
g (graph_core): the graph component reference
layers (list[Layer]): the list of layers
grx (dict): associate vertex (possibly dummy) with their sugiyama attributes
ctrls (dict): associate edge with all its vertices (including dummies)
dag (bool): the current acyclic state
initdone (bool): True if state is initialized (see init_all).
"""
def __init__(self,g):
from grandalf.utils.geometry import median_wh
# drawing parameters:
self.dirvh=0
self.order_iter = 8
self.order_attr = 'pos'
self.xspace = 20
self.yspace = 20
self.dw = 10
self.dh = 10
# For layered graphs, vertices and edges need to have some additional
# attributes that make sense only for this kind of layout:
# update graph struct:
self.g = g
self.layers = []
self.grx= {}
self.ctrls = {}
self.dag = False
for v in self.g.V():
assert hasattr(v,'view')
self.grx[v] = _sugiyama_vertex_attr()
self.dw,self.dh = median_wh([v.view for v in self.g.V()])
self.initdone = False
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges()
def _edge_inverter(self):
for e in self.alt_e:
x,y = e.v
e.v = (y,x)
self.dag = not self.dag
if self.dag:
for e in self.g.degenerated_edges:
e.detach()
self.g.sE.remove(e)
else:
for e in self.g.degenerated_edges:
self.g.add_edge(e)
# internal state for alignment policy:
# dirvh=0 -> dirh=+1, dirv=-1: leftmost upper
# dirvh=1 -> dirh=-1, dirv=-1: rightmost upper
# dirvh=2 -> dirh=+1, dirv=+1: leftmost lower
# dirvh=3 -> dirh=-1, dirv=+1: rightmost lower
@property
def dirvh(self): return self.__dirvh
@property
def dirv(self): return self.__dirv
@property
def dirh(self): return self.__dirh
@dirvh.setter
def dirvh(self,dirvh):
assert dirvh in range(4)
self.__dirvh=dirvh
self.__dirh,self.__dirv={0:(1,-1), 1:(-1,-1), 2:(1,1), 3:(-1,1)}[dirvh]
@dirv.setter
def dirv(self,dirv):
assert dirv in (-1,+1)
dirvh = (dirv+1)+(1-self.__dirh)//2
self.dirvh = dirvh
@dirh.setter
def dirh(self,dirh):
assert dirh in (-1,+1)
dirvh = (self.__dirv+1)+(1-dirh)//2
self.dirvh = dirvh
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter()
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x)
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v]))
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl)
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer()
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter()
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh # restore it
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m
def _coord_horizontal_compact(self):
limit=getrecursionlimit()
N=len(self.layers)+10
if N>limit:
setrecursionlimit(N)
dirh,dirv = self.dirh,self.dirv
g = self.grx
L = self.layers[::-dirv]
# recursive placement of blocks:
for l in L:
for v in l[::dirh]:
if g[v].root is v:
self.__place_block(v)
setrecursionlimit(limit)
# mirror all nodes if right-aligned:
if dirh==-1:
for l in L:
for v in l:
x = g[v].X
if x: g[v].X = -x
# then assign x-coord of its root:
inf=float('infinity')
rb=inf
for l in L:
for v in l[::dirh]:
g[v].x[self.dirvh] = g[g[v].root].X
rs = g[g[v].root].sink
s = g[rs].shift
if s<inf:
g[v].x[self.dirvh] += dirh*s
rb = min(rb,g[v].x[self.dirvh])
# normalize to 0, and reinit root/align/sink/shift/X
for l in self.layers:
for v in l:
#g[v].x[dirvh] -= rb
g[v].root = g[v].align = g[v].sink = v
g[v].shift = inf
g[v].X = None
# TODO: rewrite in iterative form to avoid recursion limit...
def __place_block(self,v):
g = self.grx
if g[v].X==None:
# every block is initially placed at x=0
g[v].X = 0.0
# place block in which v belongs:
w = v
while 1:
j = g[w].pos-self.dirh # predecessor in rank must be placed
r = g[w].rank
if 0<= j <len(self.layers[r]):
wprec = self.layers[r][j]
delta = self.xspace+(wprec.view.w + w.view.w)/2. # abs positive minimum displ.
# take root and place block:
u = g[wprec].root
self.__place_block(u)
# set sink as sink of prec-block root
if g[v].sink is v:
g[v].sink = g[u].sink
if g[v].sink != g[u].sink:
s = g[u].sink
newshift = g[v].X-(g[u].X+delta)
g[s].shift = min(g[s].shift,newshift)
else:
g[v].X = max(g[v].X,(g[u].X+delta))
# take next node to align in block:
w = g[w].align
# quit if self aligned
if w is v: break
|
Morrolan/surrealism
|
surrealism.py
|
show_faults
|
python
|
def show_faults():
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
|
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L71-L82
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
show_sentences
|
python
|
def show_sentences():
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
|
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L94-L111
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
get_fault
|
python
|
def get_fault(fault_id=None):
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
|
Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords.
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L185-L229
|
[
"def __get_table_limits():\n \"\"\"Here we simply take a count of each of the database tables so we know our\n upper limits for our random number calls then return a dictionary of them \n to the calling function...\"\"\"\n\n table_counts = {\n 'max_adjectives': None,\n 'max_names': None,\n 'max_nouns': None,\n 'max_sentences': None,\n 'max_faults': None,\n 'max_verbs': None\n }\n\n cursor = CONN.cursor()\n\n cursor.execute('SELECT count(*) FROM suradjs')\n table_counts['max_adjectives'] = cursor.fetchone()\n table_counts['max_adjectives'] = table_counts['max_adjectives'][0]\n\n cursor.execute('SELECT count(*) FROM surnames')\n table_counts['max_names'] = cursor.fetchone()\n table_counts['max_names'] = table_counts['max_names'][0]\n\n cursor.execute('SELECT count(*) FROM surnouns')\n table_counts['max_nouns'] = cursor.fetchone()\n table_counts['max_nouns'] = table_counts['max_nouns'][0]\n\n cursor.execute('SELECT count(*) FROM sursentences')\n table_counts['max_sen'] = cursor.fetchone()\n table_counts['max_sen'] = table_counts['max_sen'][0]\n\n cursor.execute('SELECT count(*) FROM surfaults')\n table_counts['max_fau'] = cursor.fetchone()\n table_counts['max_fau'] = table_counts['max_fau'][0]\n\n cursor.execute('SELECT count(*) FROM surverbs')\n table_counts['max_verb'] = cursor.fetchone()\n table_counts['max_verb'] = table_counts['max_verb'][0]\n\n return table_counts\n",
"def __get_fault(counts, fault_id=None):\n \"\"\"Let's fetch a random fault that we then need to substitute bits of...\n :param counts:\n :param fault_id:\n \"\"\"\n\n # First of all we need a cursor and a query to retrieve our ID's\n cursor = CONN.cursor()\n check_query = \"select fau_id from surfaults\"\n\n # Now we fetch the result of the query and save it into check_result\n cursor.execute(check_query)\n check_result = cursor.fetchall()\n\n # declare an empty list to be populated below\n id_list = []\n id_to_fetch = None\n\n for row in check_result:\n id_list.append(row[0])\n\n if fault_id is not None:\n if type(fault_id) is int:\n id_to_fetch = fault_id\n else:\n id_to_fetch = random.randint(1, counts['max_fau'])\n\n while id_to_fetch not in id_list:\n id_to_fetch = random.randint(1, counts['max_fau'])\n\n query = (\"select * from surfaults where fau_id = {0}\".format(id_to_fetch))\n cursor.execute(query)\n result = cursor.fetchone()\n # cursor.close()\n\n return result\n",
"def __process_sentence(sentence_tuple, counts):\n \"\"\"pull the actual sentence from the tuple (tuple contains additional data such as ID)\n :param _sentence_tuple:\n :param counts:\n \"\"\"\n\n sentence = sentence_tuple[2]\n\n # now we start replacing words one type at a time...\n sentence = __replace_verbs(sentence, counts)\n\n sentence = __replace_nouns(sentence, counts)\n\n sentence = ___replace_adjective_maybe(sentence, counts)\n\n sentence = __replace_adjective(sentence, counts)\n\n sentence = __replace_names(sentence, counts)\n\n # here we perform a check to see if we need to use A or AN depending on the \n # first letter of the following word...\n sentence = __replace_an(sentence)\n\n # replace the new repeating segments\n sentence = __replace_repeat(sentence)\n\n # now we will read, choose and substitute each of the RANDOM sentence tuples\n sentence = __replace_random(sentence)\n\n # now we are going to choose whether to capitalize words/sentences or not\n sentence = __replace_capitalise(sentence)\n\n # here we will choose whether to capitalize all words in the sentence\n sentence = __replace_capall(sentence)\n\n # check for appropriate spaces in the correct places.\n sentence = __check_spaces(sentence)\n\n return sentence\n"
] |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
get_sentence
|
python
|
def get_sentence(sentence_id=None):
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
|
Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords.
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L237-L282
|
[
"def __get_table_limits():\n \"\"\"Here we simply take a count of each of the database tables so we know our\n upper limits for our random number calls then return a dictionary of them \n to the calling function...\"\"\"\n\n table_counts = {\n 'max_adjectives': None,\n 'max_names': None,\n 'max_nouns': None,\n 'max_sentences': None,\n 'max_faults': None,\n 'max_verbs': None\n }\n\n cursor = CONN.cursor()\n\n cursor.execute('SELECT count(*) FROM suradjs')\n table_counts['max_adjectives'] = cursor.fetchone()\n table_counts['max_adjectives'] = table_counts['max_adjectives'][0]\n\n cursor.execute('SELECT count(*) FROM surnames')\n table_counts['max_names'] = cursor.fetchone()\n table_counts['max_names'] = table_counts['max_names'][0]\n\n cursor.execute('SELECT count(*) FROM surnouns')\n table_counts['max_nouns'] = cursor.fetchone()\n table_counts['max_nouns'] = table_counts['max_nouns'][0]\n\n cursor.execute('SELECT count(*) FROM sursentences')\n table_counts['max_sen'] = cursor.fetchone()\n table_counts['max_sen'] = table_counts['max_sen'][0]\n\n cursor.execute('SELECT count(*) FROM surfaults')\n table_counts['max_fau'] = cursor.fetchone()\n table_counts['max_fau'] = table_counts['max_fau'][0]\n\n cursor.execute('SELECT count(*) FROM surverbs')\n table_counts['max_verb'] = cursor.fetchone()\n table_counts['max_verb'] = table_counts['max_verb'][0]\n\n return table_counts\n",
"def __process_sentence(sentence_tuple, counts):\n \"\"\"pull the actual sentence from the tuple (tuple contains additional data such as ID)\n :param _sentence_tuple:\n :param counts:\n \"\"\"\n\n sentence = sentence_tuple[2]\n\n # now we start replacing words one type at a time...\n sentence = __replace_verbs(sentence, counts)\n\n sentence = __replace_nouns(sentence, counts)\n\n sentence = ___replace_adjective_maybe(sentence, counts)\n\n sentence = __replace_adjective(sentence, counts)\n\n sentence = __replace_names(sentence, counts)\n\n # here we perform a check to see if we need to use A or AN depending on the \n # first letter of the following word...\n sentence = __replace_an(sentence)\n\n # replace the new repeating segments\n sentence = __replace_repeat(sentence)\n\n # now we will read, choose and substitute each of the RANDOM sentence tuples\n sentence = __replace_random(sentence)\n\n # now we are going to choose whether to capitalize words/sentences or not\n sentence = __replace_capitalise(sentence)\n\n # here we will choose whether to capitalize all words in the sentence\n sentence = __replace_capall(sentence)\n\n # check for appropriate spaces in the correct places.\n sentence = __check_spaces(sentence)\n\n return sentence\n",
"def __get_sentence(counts, sentence_id=None):\n \"\"\"Let's fetch a random sentence that we then need to substitute bits of...\n @\n :param counts:\n :param sentence_id:\n \"\"\"\n\n # First of all we need a cursor and a query to retrieve our ID's\n cursor = CONN.cursor()\n check_query = \"select sen_id from sursentences\"\n\n # Now we fetch the result of the query and save it into check_result\n cursor.execute(check_query)\n check_result = cursor.fetchall()\n\n # declare an empty list to be populated below\n id_list = []\n id_to_fetch = None\n\n # Populate the id_list variable with all of the ID's we retrieved from the database query.\n for row in check_result:\n id_list.append(row[0])\n\n if sentence_id is not None:\n if type(sentence_id) is int:\n id_to_fetch = sentence_id\n else:\n id_to_fetch = random.randint(1, counts['max_sen'])\n\n while id_to_fetch not in id_list:\n id_to_fetch = random.randint(1, counts['max_sen'])\n\n query = (\"select * from sursentences where sen_id = {0}\".format(id_to_fetch))\n cursor.execute(query)\n result = cursor.fetchone()\n # cursor.close()\n\n return result\n"
] |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__get_sentence
|
python
|
def __get_sentence(counts, sentence_id=None):
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
|
Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L327-L364
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__get_verb
|
python
|
def __get_verb(counts):
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
|
Let's fetch a VERB
:param counts:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L367-L392
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__get_table_limits
|
python
|
def __get_table_limits():
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
|
Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function...
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L478-L518
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__process_sentence
|
python
|
def __process_sentence(sentence_tuple, counts):
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
|
pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L521-L559
|
[
"def __replace_verbs(sentence, counts):\n \"\"\"Lets find and replace all instances of #VERB\n :param _sentence:\n :param counts:\n \"\"\"\n\n if sentence is not None:\n while sentence.find('#VERB') != -1:\n sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)\n\n if sentence.find('#VERB') == -1:\n return sentence\n return sentence\n else:\n return sentence\n",
"def __replace_nouns(sentence, counts):\n \"\"\"Lets find and replace all instances of #NOUN\n :param _sentence:\n :param counts:\n \"\"\"\n\n if sentence is not None:\n while sentence.find('#NOUN') != -1:\n sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)\n\n if sentence.find('#NOUN') == -1:\n return sentence\n\n return sentence\n else:\n return sentence\n",
"def ___replace_adjective_maybe(sentence, counts):\n \"\"\"Lets find and replace all instances of #ADJECTIVE_MAYBE\n :param _sentence:\n :param counts:\n \"\"\"\n\n random_decision = random.randint(0, 1)\n\n if sentence is not None:\n\n while sentence.find('#ADJECTIVE_MAYBE') != -1:\n\n if random_decision % 2 == 0:\n sentence = sentence.replace('#ADJECTIVE_MAYBE',\n ' ' + str(__get_adjective(counts)), 1)\n elif random_decision % 2 != 0:\n sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)\n\n if sentence.find('#ADJECTIVE_MAYBE') == -1:\n return sentence\n return sentence\n else:\n return sentence\n",
"def __replace_adjective(sentence, counts):\n \"\"\"Lets find and replace all instances of #ADJECTIVE\n :param _sentence:\n :param counts:\n \"\"\"\n\n if sentence is not None:\n\n while sentence.find('#ADJECTIVE') != -1:\n sentence = sentence.replace('#ADJECTIVE',\n str(__get_adjective(counts)), 1)\n\n if sentence.find('#ADJECTIVE') == -1:\n return sentence\n return sentence\n else:\n return sentence\n",
"def __replace_names(sentence, counts):\n \"\"\"Lets find and replace all instances of #NAME\n :param _sentence:\n :param counts:\n \"\"\"\n\n if sentence is not None:\n\n while sentence.find('#NAME') != -1:\n sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)\n\n if sentence.find('#NAME') == -1:\n return sentence\n return sentence\n else:\n return sentence\n",
"def __replace_an(sentence):\n \"\"\"Lets find and replace all instances of #AN\n This is a little different, as this depends on whether the next\n word starts with a vowel or a consonant.\n\n :param _sentence:\n \"\"\"\n\n if sentence is not None:\n while sentence.find('#AN') != -1:\n an_index = sentence.find('#AN')\n\n if an_index > -1:\n an_index += 4\n\n if sentence[an_index] in 'aeiouAEIOU':\n sentence = sentence.replace('#AN', str('an'), 1)\n else:\n sentence = sentence.replace('#AN', str('a'), 1)\n\n if sentence.find('#AN') == -1:\n return sentence\n return sentence\n else:\n return sentence\n",
"def __replace_repeat(sentence):\n \"\"\"\n Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.\n\n :param sentence:\n \"\"\"\n\n ######### USE SENTENCE_ID 47 for testing!\n\n repeat_dict = {}\n\n if sentence is not None:\n\n while sentence.find('#DEFINE_REPEAT') != -1:\n begin_index = sentence.find('#DEFINE_REPEAT')\n start_index = begin_index + 15\n end_index = sentence.find(']')\n\n if sentence.find('#DEFINE_REPEAT') is not None:\n sub_list = sentence[start_index:end_index].split(',')\n choice = sub_list[0]\n repeat_text = sub_list[1]\n repeat_dict[choice] = repeat_text\n sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)\n\n while sentence.find('#REPEAT') != -1:\n if sentence.find('#REPEAT') is not None:\n repeat_begin_index = sentence.find('#REPEAT')\n repeat_start_index = repeat_begin_index + 8\n # by searching from repeat_index below we don't encounter dodgy bracket-matching errors.\n repeat_end_index = sentence.find(']', repeat_start_index)\n repeat_index = sentence[repeat_start_index:repeat_end_index]\n\n if repeat_index in repeat_dict:\n sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],\n str(repeat_dict[repeat_index]))\n\n if sentence.find('#REPEAT') == -1:\n return sentence\n return sentence\n else:\n return sentence\n",
"def __replace_random(sentence):\n \"\"\"Lets find and replace all instances of #RANDOM\n :param _sentence:\n \"\"\"\n\n sub_list = None\n choice = None\n\n if sentence is not None:\n\n while sentence.find('#RANDOM') != -1:\n\n random_index = sentence.find('#RANDOM')\n start_index = sentence.find('#RANDOM') + 8\n end_index = sentence.find(']')\n\n if sentence.find('#RANDOM') is not None:\n sub_list = sentence[start_index:end_index].split(',')\n\n choice = random.randint(1, int(sub_list[0]))\n # _sub_list[_choice]\n\n to_be_replaced = sentence[random_index:end_index + 1]\n sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)\n\n if sentence.find('#RANDOM') == -1:\n return sentence\n\n return sentence\n else:\n return sentence\n",
"def __replace_capitalise(sentence):\n \"\"\"here we replace all instances of #CAPITALISE and cap the next word.\n ############\n\n #NOTE: Buggy as hell, as it doesn't account for words that are already\n #capitalized\n ############\n\n :param _sentence:\n \"\"\"\n\n if sentence is not None:\n while sentence.find('#CAPITALISE') != -1:\n\n cap_index = _sentence.find('#CAPITALISE')\n part1 = sentence[:cap_index]\n part2 = sentence[cap_index + 12:cap_index + 13]\n part3 = sentence[cap_index + 13:]\n\n if part2 in \"abcdefghijklmnopqrstuvwxyz\":\n sentence = part1 + part2.capitalize() + part3\n else:\n sentence = part1 + part2 + part3\n\n if sentence.find('#CAPITALISE') == -1:\n return sentence\n else:\n return sentence\n",
"def __replace_capall(sentence):\n \"\"\"here we replace all instances of #CAPALL and cap the entire sentence.\n Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?\n\n :param _sentence:\n \"\"\"\n\n # print \"\\nReplacing CAPITALISE: \"\n\n if sentence is not None:\n while sentence.find('#CAPALL') != -1:\n # _cap_index = _sentence.find('#CAPALL')\n sentence = sentence.upper()\n sentence = sentence.replace('#CAPALL ', '', 1)\n\n if sentence.find('#CAPALL') == -1:\n return sentence\n else:\n return sentence\n",
"def __check_spaces(sentence):\n \"\"\"\n Here we check to see that we have the correct number of spaces in the correct locations.\n\n :param _sentence:\n :return:\n \"\"\"\n # We have to run the process multiple times:\n # Once to search for all spaces, and check if there are adjoining spaces;\n # The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?\n\n if sentence is not None:\n\n words = sentence.split()\n\n new_sentence = ''\n\n for (i, word) in enumerate(words):\n\n if word[-1] in set('.!?'):\n word += ' '\n new_word = ''.join(word)\n new_sentence += ' ' + new_word\n\n # remove any trailing whitespace\n new_sentence = new_sentence.strip()\n\n return new_sentence\n"
] |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_verbs
|
python
|
def __replace_verbs(sentence, counts):
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L562-L576
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_nouns
|
python
|
def __replace_nouns(sentence, counts):
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L579-L594
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
___replace_adjective_maybe
|
python
|
def ___replace_adjective_maybe(sentence, counts):
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L597-L619
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_adjective
|
python
|
def __replace_adjective(sentence, counts):
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L622-L638
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_names
|
python
|
def __replace_names(sentence, counts):
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L641-L656
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_an
|
python
|
def __replace_an(sentence):
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L659-L683
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_random
|
python
|
def __replace_random(sentence):
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
|
Lets find and replace all instances of #RANDOM
:param _sentence:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L686-L716
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_repeat
|
python
|
def __replace_repeat(sentence):
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
|
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L719-L760
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_capitalise
|
python
|
def __replace_capitalise(sentence):
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
|
here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L763-L790
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__replace_capall
|
python
|
def __replace_capall(sentence):
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
|
here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L793-L811
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __check_spaces(sentence):
"""
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
"""
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Morrolan/surrealism
|
surrealism.py
|
__check_spaces
|
python
|
def __check_spaces(sentence):
# We have to run the process multiple times:
# Once to search for all spaces, and check if there are adjoining spaces;
# The second time to check for 2 spaces after sentence-ending characters such as . and ! and ?
if sentence is not None:
words = sentence.split()
new_sentence = ''
for (i, word) in enumerate(words):
if word[-1] in set('.!?'):
word += ' '
new_word = ''.join(word)
new_sentence += ' ' + new_word
# remove any trailing whitespace
new_sentence = new_sentence.strip()
return new_sentence
|
Here we check to see that we have the correct number of spaces in the correct locations.
:param _sentence:
:return:
|
train
|
https://github.com/Morrolan/surrealism/blob/7fdd2eae534410df16ee1f9d7e9bb77aa10decab/surrealism.py#L814-L841
| null |
#!/usr/bin/env python
#############################################################################
# surrealism.py - Surreal sentence and error message generator
# Copyright (C) 2014 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#
# This is a derivative work (used with permission) from www.ravenblack.net
# Credit also goes to Kevan Davis on whose work the surrealism generator at
# Ravenblack.net is based on...
#############################################################################
__all__ = ['show_faults', 'show_sentences', 'getfault', 'getsentence', 'version',
'sentence_test', 'fault_test', 'show_sentences', 'show_faults']
# IMPORTS ###################################################################
import sqlite3
import random
import pkg_resources
# PARTICULAR IMPORTS ########################################################
from pkg_resources import resource_filename
# CONSTANTS #################################################################
CONN = sqlite3.connect(resource_filename(__name__, 'surrealism.sqlite'))
# VARIABLES #################################################################
# CLASSES ###################################################################
#############################################################################
# EXTERNAL METHODS BELOW
def version():
"""Returns the current version of the Surrealism module."""
return pkg_resources.require('surrealism')[0].version
def showfaults():
"""
This exists for backwards compatibility
:return:
"""
output = show_faults()
return output
def show_faults():
"""
Return all valid/active faults ordered by ID to allow the user to pick and choose.
:return: List of Tuples where the Tuple elements are: (fault id, fault template)
"""
cursor = CONN.cursor()
query = "select fau_id, fault from surfaults where fau_is_valid = 'y' order by fau_id asc"
cursor.execute(query)
result = cursor.fetchall()
return result
def showsentences():
"""
This exists for backwards compatibility
:return:
"""
output = show_sentences()
return output
def show_sentences():
"""
Return all valid/active sentences ordered by ID to allow the user to pick and choose.
:return: Dict containing the sentence ID as the key and the sentence structure as the value.
"""
cursor = CONN.cursor()
query = "select sen_id, sentence from sursentences where sen_is_valid = 'y' order by sen_id asc"
cursor.execute(query)
result = cursor.fetchall()
response_dict = {}
for row in result:
response_dict[row[0]] = row[1]
return response_dict
def faulttest():
"""
This exists for backwards compatibility
:return:
"""
output = fault_test()
return output
def fault_test():
"""Returns 1 instance of each programming fault for testing purposes."""
counts = __get_table_limits()
max_num = counts['max_fau']
counter = 0
list_of_tuples = []
fault_result = None
while counter < max_num:
counter += 1
fault = __get_fault(counts, fault_id=counter)
fault_id = fault[1]
if fault[0] == 'n':
fault_result = "Fault is DISABLED - ignoring..."
if fault[0] == 'y':
fault_result = __process_sentence(fault, counts)
list_of_tuples.append((fault_id, fault_result))
return list_of_tuples
def sentencetest():
"""
This exists for backwards compatibility
:return:
"""
output = sentence_test()
return output
def sentence_test():
"""Return 1 random version of each sentence to test sentence structure."""
counts = __get_table_limits()
max_num = counts['max_sen']
counter = 0
list_of_tuples = []
sentence_result = None
while counter < max_num:
counter += 1
sentence = __get_sentence(counts, sentence_id=counter)
sentence_id = sentence[1]
if sentence[0] == 'n':
sentence_result = "Sentence is DISABLED - ignoring..."
if sentence[0] == 'y':
sentence_result = __process_sentence(sentence, counts)
list_of_tuples.append((sentence_id, sentence_result))
return list_of_tuples
def getfault(fault_id=None):
output = get_fault(fault_id)
return output
def get_fault(fault_id=None):
"""Retrieve a randomly-generated error message as a unicode string.
:param fault_id:
Allows you to optionally specify an integer representing the fault_id
from the database table. This allows you to retrieve a specific fault
each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(fault_id, int):
id_ = fault_id
elif isinstance(fault_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(fault_id)
else:
id_ = random.randint(1, counts['max_fau'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_fau']:
fault = __get_fault(counts, fault_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_fau'])))
id_ = counts['max_fau']
fault = __get_fault(counts, fault_id=id_)
if fault is not None:
while fault[0] == 'n':
if id_ is not None:
fault = __get_fault(counts, None)
else:
fault = __get_fault(counts, id_)
if fault[0] == 'y':
result = __process_sentence(fault, counts)
return result
else:
print('ValueError: _fault cannot be None.')
def getsentence(sentence_id=None):
output = get_sentence(sentence_id)
return output
def get_sentence(sentence_id=None):
"""Retrieve a randomly-generated sentence as a unicode string.
:param sentence_id:
Allows you to optionally specify an integer representing the sentence_id
from the database table. This allows you to retrieve a specific
sentence each time, albeit with different keywords."""
counts = __get_table_limits()
result = None
id_ = 0
try:
if isinstance(sentence_id, int):
id_ = sentence_id
elif isinstance(sentence_id, float):
print("""ValueError: Floating point number detected.
Rounding number to 0 decimal places.""")
id_ = round(sentence_id)
else:
id_ = random.randint(1, counts['max_sen'])
except ValueError:
print("ValueError: Incorrect parameter type detected.")
if id_ <= counts['max_sen']:
sentence = __get_sentence(counts, sentence_id=id_)
else:
print("""ValueError: Parameter integer is too high.
Maximum permitted value is {0}.""".format(str(counts['max_sen'])))
id_ = counts['max_sen']
sentence = __get_sentence(counts, sentence_id=id_)
if sentence is not None:
while sentence[0] == 'n':
if id_ is not None:
# here we delibrately pass 'None' to __getsentence__ as it will
sentence = __get_sentence(counts, None)
else:
sentence = __get_sentence(counts, id_)
if sentence[0] == 'y':
result = __process_sentence(sentence, counts)
return result
else:
print('ValueError: _sentence cannot be None.')
#############################################################################
# INTERNAL METHODS BELOW
def __get_fault(counts, fault_id=None):
"""Let's fetch a random fault that we then need to substitute bits of...
:param counts:
:param fault_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select fau_id from surfaults"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
for row in check_result:
id_list.append(row[0])
if fault_id is not None:
if type(fault_id) is int:
id_to_fetch = fault_id
else:
id_to_fetch = random.randint(1, counts['max_fau'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_fau'])
query = ("select * from surfaults where fau_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_sentence(counts, sentence_id=None):
"""Let's fetch a random sentence that we then need to substitute bits of...
@
:param counts:
:param sentence_id:
"""
# First of all we need a cursor and a query to retrieve our ID's
cursor = CONN.cursor()
check_query = "select sen_id from sursentences"
# Now we fetch the result of the query and save it into check_result
cursor.execute(check_query)
check_result = cursor.fetchall()
# declare an empty list to be populated below
id_list = []
id_to_fetch = None
# Populate the id_list variable with all of the ID's we retrieved from the database query.
for row in check_result:
id_list.append(row[0])
if sentence_id is not None:
if type(sentence_id) is int:
id_to_fetch = sentence_id
else:
id_to_fetch = random.randint(1, counts['max_sen'])
while id_to_fetch not in id_list:
id_to_fetch = random.randint(1, counts['max_sen'])
query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch))
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result
def __get_verb(counts):
"""Let's fetch a VERB
:param counts:
"""
cursor = CONN.cursor()
check_query = "select verb_id from surverbs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_verb'])
while rand not in id_list:
rand = random.randint(1, counts['max_verb'])
query = "select * from surverbs where verb_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_noun(counts):
"""Let's fetch a NOUN from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select noun_id from surnouns"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_nouns'])
while rand not in id_list:
rand = random.randint(1, counts['max_nouns'])
query = "select * from surnouns where noun_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_adjective(counts):
"""Let's fetch an ADJECTIVE from the database...
:param counts:
"""
cursor = CONN.cursor()
check_query = "select adj_id from suradjs"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_adjectives'])
while rand not in id_list:
rand = random.randint(1, counts['max_adjectives'])
query = "select * from suradjs where adj_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_name(counts):
"""Let's fetch a NAME from the database...
:param counts:"""
cursor = CONN.cursor()
check_query = "select name_id from surnames"
cursor.execute(check_query)
check_result = cursor.fetchall()
id_list = []
for row in check_result:
id_list.append(row[0])
rand = random.randint(1, counts['max_names'])
while rand not in id_list:
rand = random.randint(1, counts['max_names'])
query = "select * from surnames where name_id = {0}".format(rand)
cursor.execute(query)
result = cursor.fetchone()
# cursor.close()
return result[1]
def __get_table_limits():
"""Here we simply take a count of each of the database tables so we know our
upper limits for our random number calls then return a dictionary of them
to the calling function..."""
table_counts = {
'max_adjectives': None,
'max_names': None,
'max_nouns': None,
'max_sentences': None,
'max_faults': None,
'max_verbs': None
}
cursor = CONN.cursor()
cursor.execute('SELECT count(*) FROM suradjs')
table_counts['max_adjectives'] = cursor.fetchone()
table_counts['max_adjectives'] = table_counts['max_adjectives'][0]
cursor.execute('SELECT count(*) FROM surnames')
table_counts['max_names'] = cursor.fetchone()
table_counts['max_names'] = table_counts['max_names'][0]
cursor.execute('SELECT count(*) FROM surnouns')
table_counts['max_nouns'] = cursor.fetchone()
table_counts['max_nouns'] = table_counts['max_nouns'][0]
cursor.execute('SELECT count(*) FROM sursentences')
table_counts['max_sen'] = cursor.fetchone()
table_counts['max_sen'] = table_counts['max_sen'][0]
cursor.execute('SELECT count(*) FROM surfaults')
table_counts['max_fau'] = cursor.fetchone()
table_counts['max_fau'] = table_counts['max_fau'][0]
cursor.execute('SELECT count(*) FROM surverbs')
table_counts['max_verb'] = cursor.fetchone()
table_counts['max_verb'] = table_counts['max_verb'][0]
return table_counts
def __process_sentence(sentence_tuple, counts):
"""pull the actual sentence from the tuple (tuple contains additional data such as ID)
:param _sentence_tuple:
:param counts:
"""
sentence = sentence_tuple[2]
# now we start replacing words one type at a time...
sentence = __replace_verbs(sentence, counts)
sentence = __replace_nouns(sentence, counts)
sentence = ___replace_adjective_maybe(sentence, counts)
sentence = __replace_adjective(sentence, counts)
sentence = __replace_names(sentence, counts)
# here we perform a check to see if we need to use A or AN depending on the
# first letter of the following word...
sentence = __replace_an(sentence)
# replace the new repeating segments
sentence = __replace_repeat(sentence)
# now we will read, choose and substitute each of the RANDOM sentence tuples
sentence = __replace_random(sentence)
# now we are going to choose whether to capitalize words/sentences or not
sentence = __replace_capitalise(sentence)
# here we will choose whether to capitalize all words in the sentence
sentence = __replace_capall(sentence)
# check for appropriate spaces in the correct places.
sentence = __check_spaces(sentence)
return sentence
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence
def __replace_nouns(sentence, counts):
"""Lets find and replace all instances of #NOUN
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NOUN') != -1:
sentence = sentence.replace('#NOUN', str(__get_noun(counts)), 1)
if sentence.find('#NOUN') == -1:
return sentence
return sentence
else:
return sentence
def ___replace_adjective_maybe(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE_MAYBE
:param _sentence:
:param counts:
"""
random_decision = random.randint(0, 1)
if sentence is not None:
while sentence.find('#ADJECTIVE_MAYBE') != -1:
if random_decision % 2 == 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE',
' ' + str(__get_adjective(counts)), 1)
elif random_decision % 2 != 0:
sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1)
if sentence.find('#ADJECTIVE_MAYBE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence
def __replace_an(sentence):
"""Lets find and replace all instances of #AN
This is a little different, as this depends on whether the next
word starts with a vowel or a consonant.
:param _sentence:
"""
if sentence is not None:
while sentence.find('#AN') != -1:
an_index = sentence.find('#AN')
if an_index > -1:
an_index += 4
if sentence[an_index] in 'aeiouAEIOU':
sentence = sentence.replace('#AN', str('an'), 1)
else:
sentence = sentence.replace('#AN', str('a'), 1)
if sentence.find('#AN') == -1:
return sentence
return sentence
else:
return sentence
def __replace_random(sentence):
"""Lets find and replace all instances of #RANDOM
:param _sentence:
"""
sub_list = None
choice = None
if sentence is not None:
while sentence.find('#RANDOM') != -1:
random_index = sentence.find('#RANDOM')
start_index = sentence.find('#RANDOM') + 8
end_index = sentence.find(']')
if sentence.find('#RANDOM') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = random.randint(1, int(sub_list[0]))
# _sub_list[_choice]
to_be_replaced = sentence[random_index:end_index + 1]
sentence = sentence.replace(to_be_replaced, sub_list[choice], 1)
if sentence.find('#RANDOM') == -1:
return sentence
return sentence
else:
return sentence
def __replace_repeat(sentence):
"""
Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences.
:param sentence:
"""
######### USE SENTENCE_ID 47 for testing!
repeat_dict = {}
if sentence is not None:
while sentence.find('#DEFINE_REPEAT') != -1:
begin_index = sentence.find('#DEFINE_REPEAT')
start_index = begin_index + 15
end_index = sentence.find(']')
if sentence.find('#DEFINE_REPEAT') is not None:
sub_list = sentence[start_index:end_index].split(',')
choice = sub_list[0]
repeat_text = sub_list[1]
repeat_dict[choice] = repeat_text
sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1)
while sentence.find('#REPEAT') != -1:
if sentence.find('#REPEAT') is not None:
repeat_begin_index = sentence.find('#REPEAT')
repeat_start_index = repeat_begin_index + 8
# by searching from repeat_index below we don't encounter dodgy bracket-matching errors.
repeat_end_index = sentence.find(']', repeat_start_index)
repeat_index = sentence[repeat_start_index:repeat_end_index]
if repeat_index in repeat_dict:
sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1],
str(repeat_dict[repeat_index]))
if sentence.find('#REPEAT') == -1:
return sentence
return sentence
else:
return sentence
def __replace_capitalise(sentence):
"""here we replace all instances of #CAPITALISE and cap the next word.
############
#NOTE: Buggy as hell, as it doesn't account for words that are already
#capitalized
############
:param _sentence:
"""
if sentence is not None:
while sentence.find('#CAPITALISE') != -1:
cap_index = _sentence.find('#CAPITALISE')
part1 = sentence[:cap_index]
part2 = sentence[cap_index + 12:cap_index + 13]
part3 = sentence[cap_index + 13:]
if part2 in "abcdefghijklmnopqrstuvwxyz":
sentence = part1 + part2.capitalize() + part3
else:
sentence = part1 + part2 + part3
if sentence.find('#CAPITALISE') == -1:
return sentence
else:
return sentence
def __replace_capall(sentence):
"""here we replace all instances of #CAPALL and cap the entire sentence.
Don't believe that CAPALL is buggy anymore as it forces all uppercase OK?
:param _sentence:
"""
# print "\nReplacing CAPITALISE: "
if sentence is not None:
while sentence.find('#CAPALL') != -1:
# _cap_index = _sentence.find('#CAPALL')
sentence = sentence.upper()
sentence = sentence.replace('#CAPALL ', '', 1)
if sentence.find('#CAPALL') == -1:
return sentence
else:
return sentence
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
option
|
python
|
def option(current_kwargs, **kwargs):
tmp_kwargs = dict((key, current_kwargs.get(key)) for key, value in kwargs.items())
current_kwargs.update(kwargs)
yield
current_kwargs.update(tmp_kwargs)
|
Context manager for temporarily setting a keyword argument and
then restoring it to whatever it was before.
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L105-L114
| null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import contextlib
import json
import re
import os
from os import path
from jinja2 import Environment, FileSystemLoader, nodes
import six
OPERANDS = {
'eq': '===',
'ne': '!==',
'lt': ' < ',
'gt': ' > ',
'lteq': ' <= ',
'gteq': ' >= '
}
DICT_ITER_METHODS = (
'iteritems',
'items',
'values',
'keys'
)
STATE_DEFAULT = 0
STATE_EXECUTING = 1
STATE_INTERPOLATING = 2
LOOP_HELPER_INDEX = 'index'
LOOP_HELPER_INDEX_0 = 'index0'
LOOP_HELPER_FIRST = 'first'
LOOP_HELPER_LAST = 'last'
LOOP_HELPER_LENGTH = 'length'
LOOP_HELPERS = (
LOOP_HELPER_INDEX,
LOOP_HELPER_INDEX_0,
LOOP_HELPER_FIRST,
LOOP_HELPER_LAST,
LOOP_HELPER_LENGTH
)
def amd_format(dependencies, template_function):
result = 'define(['
result += ",".join('"{0}"'.format(x[0]) for x in dependencies)
result += '], function ('
result += ",".join(x[1] for x in dependencies)
result += ') { return '
result += template_function
result += '; });'
return result
def commonjs_format(dependencies, template_function):
result = ''.join('var {0} = require("{1}");'.format(y, x) for x, y in dependencies)
result += 'module.exports = {0};'.format(template_function)
return result
def es6_format(dependencies, template_function):
result = ''.join('import {0} from "{1}";'.format(y, x) for x, y in dependencies)
result += 'export default {0}'.format(template_function)
return result
JS_MODULE_FORMATS = {
None: lambda dependencies, template_function: template_function,
'amd': amd_format,
'commonjs': commonjs_format,
'es6': es6_format
}
# This string has to double all the '{' and '}' due to Python's string formatting.
# See - https://docs.python.org/2/library/string.html#formatstrings
TEMPLATE_WRAPPER = """
function {function_name}(ctx) {{
var __result = "";
var __tmp;
var __runtime = jinjaToJS.runtime;
var __filters = jinjaToJS.filters;
var __globals = jinjaToJS.globals;
var context = jinjaToJS.createContext(ctx);
{template_code}
return __result;
}}
"""
class ExtendsException(Exception):
"""
Raised when an {% extends %} is encountered. At this point the parent template is
loaded and all blocks defined in the current template passed to it.
"""
pass
@contextlib.contextmanager
def is_method_call(node, method_name):
"""
Returns True if `node` is a method call for `method_name`. `method_name`
can be either a string or an iterable of strings.
"""
if not isinstance(node, nodes.Call):
return False
if isinstance(node.node, nodes.Getattr):
# e.g. foo.bar()
method = node.node.attr
elif isinstance(node.node, nodes.Name):
# e.g. bar()
method = node.node.name
elif isinstance(node.node, nodes.Getitem):
# e.g. foo["bar"]()
method = node.node.arg.value
else:
return False
if isinstance(method_name, (list, tuple)):
return method in method_name
return method == method_name
def is_loop_helper(node):
"""
Returns True is node is a loop helper e.g. {{ loop.index }} or {{ loop.first }}
"""
return hasattr(node, 'node') and isinstance(node.node, nodes.Name) and node.node.name == 'loop'
def temp_var_names_generator():
x = 0
while True:
yield '__$%s' % x
x += 1
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
is_method_call
|
python
|
def is_method_call(node, method_name):
if not isinstance(node, nodes.Call):
return False
if isinstance(node.node, nodes.Getattr):
# e.g. foo.bar()
method = node.node.attr
elif isinstance(node.node, nodes.Name):
# e.g. bar()
method = node.node.name
elif isinstance(node.node, nodes.Getitem):
# e.g. foo["bar"]()
method = node.node.arg.value
else:
return False
if isinstance(method_name, (list, tuple)):
return method in method_name
return method == method_name
|
Returns True if `node` is a method call for `method_name`. `method_name`
can be either a string or an iterable of strings.
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L117-L144
| null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import contextlib
import json
import re
import os
from os import path
from jinja2 import Environment, FileSystemLoader, nodes
import six
OPERANDS = {
'eq': '===',
'ne': '!==',
'lt': ' < ',
'gt': ' > ',
'lteq': ' <= ',
'gteq': ' >= '
}
DICT_ITER_METHODS = (
'iteritems',
'items',
'values',
'keys'
)
STATE_DEFAULT = 0
STATE_EXECUTING = 1
STATE_INTERPOLATING = 2
LOOP_HELPER_INDEX = 'index'
LOOP_HELPER_INDEX_0 = 'index0'
LOOP_HELPER_FIRST = 'first'
LOOP_HELPER_LAST = 'last'
LOOP_HELPER_LENGTH = 'length'
LOOP_HELPERS = (
LOOP_HELPER_INDEX,
LOOP_HELPER_INDEX_0,
LOOP_HELPER_FIRST,
LOOP_HELPER_LAST,
LOOP_HELPER_LENGTH
)
def amd_format(dependencies, template_function):
result = 'define(['
result += ",".join('"{0}"'.format(x[0]) for x in dependencies)
result += '], function ('
result += ",".join(x[1] for x in dependencies)
result += ') { return '
result += template_function
result += '; });'
return result
def commonjs_format(dependencies, template_function):
result = ''.join('var {0} = require("{1}");'.format(y, x) for x, y in dependencies)
result += 'module.exports = {0};'.format(template_function)
return result
def es6_format(dependencies, template_function):
result = ''.join('import {0} from "{1}";'.format(y, x) for x, y in dependencies)
result += 'export default {0}'.format(template_function)
return result
JS_MODULE_FORMATS = {
None: lambda dependencies, template_function: template_function,
'amd': amd_format,
'commonjs': commonjs_format,
'es6': es6_format
}
# This string has to double all the '{' and '}' due to Python's string formatting.
# See - https://docs.python.org/2/library/string.html#formatstrings
TEMPLATE_WRAPPER = """
function {function_name}(ctx) {{
var __result = "";
var __tmp;
var __runtime = jinjaToJS.runtime;
var __filters = jinjaToJS.filters;
var __globals = jinjaToJS.globals;
var context = jinjaToJS.createContext(ctx);
{template_code}
return __result;
}}
"""
class ExtendsException(Exception):
"""
Raised when an {% extends %} is encountered. At this point the parent template is
loaded and all blocks defined in the current template passed to it.
"""
pass
@contextlib.contextmanager
def option(current_kwargs, **kwargs):
"""
Context manager for temporarily setting a keyword argument and
then restoring it to whatever it was before.
"""
tmp_kwargs = dict((key, current_kwargs.get(key)) for key, value in kwargs.items())
current_kwargs.update(kwargs)
yield
current_kwargs.update(tmp_kwargs)
def is_loop_helper(node):
"""
Returns True is node is a loop helper e.g. {{ loop.index }} or {{ loop.first }}
"""
return hasattr(node, 'node') and isinstance(node.node, nodes.Name) and node.node.name == 'loop'
def temp_var_names_generator():
x = 0
while True:
yield '__$%s' % x
x += 1
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
is_loop_helper
|
python
|
def is_loop_helper(node):
return hasattr(node, 'node') and isinstance(node.node, nodes.Name) and node.node.name == 'loop'
|
Returns True is node is a loop helper e.g. {{ loop.index }} or {{ loop.first }}
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L147-L151
| null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import contextlib
import json
import re
import os
from os import path
from jinja2 import Environment, FileSystemLoader, nodes
import six
OPERANDS = {
'eq': '===',
'ne': '!==',
'lt': ' < ',
'gt': ' > ',
'lteq': ' <= ',
'gteq': ' >= '
}
DICT_ITER_METHODS = (
'iteritems',
'items',
'values',
'keys'
)
STATE_DEFAULT = 0
STATE_EXECUTING = 1
STATE_INTERPOLATING = 2
LOOP_HELPER_INDEX = 'index'
LOOP_HELPER_INDEX_0 = 'index0'
LOOP_HELPER_FIRST = 'first'
LOOP_HELPER_LAST = 'last'
LOOP_HELPER_LENGTH = 'length'
LOOP_HELPERS = (
LOOP_HELPER_INDEX,
LOOP_HELPER_INDEX_0,
LOOP_HELPER_FIRST,
LOOP_HELPER_LAST,
LOOP_HELPER_LENGTH
)
def amd_format(dependencies, template_function):
result = 'define(['
result += ",".join('"{0}"'.format(x[0]) for x in dependencies)
result += '], function ('
result += ",".join(x[1] for x in dependencies)
result += ') { return '
result += template_function
result += '; });'
return result
def commonjs_format(dependencies, template_function):
result = ''.join('var {0} = require("{1}");'.format(y, x) for x, y in dependencies)
result += 'module.exports = {0};'.format(template_function)
return result
def es6_format(dependencies, template_function):
result = ''.join('import {0} from "{1}";'.format(y, x) for x, y in dependencies)
result += 'export default {0}'.format(template_function)
return result
JS_MODULE_FORMATS = {
None: lambda dependencies, template_function: template_function,
'amd': amd_format,
'commonjs': commonjs_format,
'es6': es6_format
}
# This string has to double all the '{' and '}' due to Python's string formatting.
# See - https://docs.python.org/2/library/string.html#formatstrings
TEMPLATE_WRAPPER = """
function {function_name}(ctx) {{
var __result = "";
var __tmp;
var __runtime = jinjaToJS.runtime;
var __filters = jinjaToJS.filters;
var __globals = jinjaToJS.globals;
var context = jinjaToJS.createContext(ctx);
{template_code}
return __result;
}}
"""
class ExtendsException(Exception):
"""
Raised when an {% extends %} is encountered. At this point the parent template is
loaded and all blocks defined in the current template passed to it.
"""
pass
@contextlib.contextmanager
def option(current_kwargs, **kwargs):
"""
Context manager for temporarily setting a keyword argument and
then restoring it to whatever it was before.
"""
tmp_kwargs = dict((key, current_kwargs.get(key)) for key, value in kwargs.items())
current_kwargs.update(kwargs)
yield
current_kwargs.update(tmp_kwargs)
def is_method_call(node, method_name):
"""
Returns True if `node` is a method call for `method_name`. `method_name`
can be either a string or an iterable of strings.
"""
if not isinstance(node, nodes.Call):
return False
if isinstance(node.node, nodes.Getattr):
# e.g. foo.bar()
method = node.node.attr
elif isinstance(node.node, nodes.Name):
# e.g. bar()
method = node.node.name
elif isinstance(node.node, nodes.Getitem):
# e.g. foo["bar"]()
method = node.node.arg.value
else:
return False
if isinstance(method_name, (list, tuple)):
return method in method_name
return method == method_name
def temp_var_names_generator():
x = 0
while True:
yield '__$%s' % x
x += 1
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS.get_output
|
python
|
def get_output(self):
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
|
Returns the generated JavaScript code.
Returns:
str
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L251-L268
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._get_depencency_var_name
|
python
|
def _get_depencency_var_name(self, dependency):
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
|
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L270-L283
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._add_dependency
|
python
|
def _add_dependency(self, dependency, var_name=None):
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
|
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L285-L302
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_extends
|
python
|
def _process_extends(self, node, **kwargs):
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
|
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L312-L347
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_block
|
python
|
def _process_block(self, node, **kwargs):
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
|
Processes a block e.g. `{% block my_block %}{% endblock %}`
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L349-L381
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_output
|
python
|
def _process_output(self, node, **kwargs):
for n in node.nodes:
self._process_node(n, **kwargs)
|
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L383-L388
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_templatedata
|
python
|
def _process_templatedata(self, node, **_):
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
|
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L390-L403
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_name
|
python
|
def _process_name(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
|
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L405-L422
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_getattr
|
python
|
def _process_getattr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
|
Processes a `GetAttr` node. e.g. {{ foo.bar }}
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L424-L436
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_getitem
|
python
|
def _process_getitem(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
|
Processes a `GetItem` node e.g. {{ foo["bar"] }}
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L438-L467
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_for
|
python
|
def _process_for(self, node, **kwargs):
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
|
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L469-L531
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_if
|
python
|
def _process_if(self, node, execute_end=None, **kwargs):
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
|
Processes an if block e.g. `{% if foo %} do something {% endif %}`
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L533-L586
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_math
|
python
|
def _process_math(self, node, math_operator=None, function=None, **kwargs):
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
|
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L1010-L1026
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._process_loop_helper
|
python
|
def _process_loop_helper(self, node, **kwargs):
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
|
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L1028-L1042
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._execution
|
python
|
def _execution(self):
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
|
Context manager for executing some JavaScript inside a template.
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L1054-L1070
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
"""
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
JinjaToJS._scoped_variables
|
python
|
def _scoped_variables(self, nodes_list, **kwargs):
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var))
|
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
|
train
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L1094-L1133
| null |
class JinjaToJS(object):
def __init__(self,
template_root,
template_name,
js_module_format=None,
runtime_path='jinja-to-js',
include_prefix='',
include_ext='',
child_blocks=None,
dependencies=None,
custom_filters=None):
"""
Args:
template_root (str): The path to where templates should be loaded from.
template_name (str): The name of the template to compile (relative to `template_root`).
js_module_format (str, optional): The JavaScript module format to use.
One of ('amd', 'commonjs', 'es6')
runtime_path (str, optional): If `js_module_format` is specified then the JavaScript
runtime will be imported using the appropriate method.
It defaults to assuming it will be imported from
`node_modules` but you can change it using this option.
include_prefix (str, optional): If using the `amd` module format you can use this option
to add a prefix to every include path as AMD imports are
generally relative to the main file, not the module
importing.
include_ext (str, optional): By default any includes will be references without an
extension, as neither AMD, commonJS or ES6 require the
'.js' extension. If you want to use an extension, say
'.template' then set this option to a string including
the leading '.'
child_blocks (dict, optional): Used internally when handling templates that extend
other templates.
dependencies (list of tuple, optional): Used internally when handling templates that
extend other templates.
custom_filters (list of str, optional): List of custom filters which should be allowed.
These may be filters supported by Jinja but not
supported by jinja-to-js. These filters MUST be
registered with the jinja-to-js JS runtime.
"""
self.environment = Environment(loader=FileSystemLoader(template_root),
autoescape=True,
extensions=['jinja2.ext.with_', 'jinja2.ext.autoescape'])
self.output = six.StringIO()
self.stored_names = set()
self.temp_var_names = temp_var_names_generator()
self.state = STATE_DEFAULT
self.child_blocks = child_blocks or {}
self.dependencies = dependencies or []
self._runtime_function_cache = []
self.js_module_format = js_module_format
self.runtime_path = runtime_path
self.include_prefix = include_prefix
self.include_ext = include_ext
self.template_root = template_root
self.template_name = template_name
self.custom_filters = custom_filters or []
# The name of the JavaScript function that will output this template. By using a named
# function the template can call itself which is required to support recursive includes.
self.js_function_name = 'template' + ''.join(
x.title() for x in re.split(r'[^\w]|_', path.splitext(self.template_name)[0])
)
self.context_name = 'context'
self._add_dependency(self.runtime_path, 'jinjaToJS')
template_string, template_path, _ = self.environment.loader.get_source(
self.environment, self.template_name
)
# It is assumed that this will be the absolute path to the template. It is used to work out
# related paths for inclues.
self.template_path = template_path
if self.js_module_format not in JS_MODULE_FORMATS.keys():
raise ValueError(
'The js_module_format option must be one of: %s' % JS_MODULE_FORMATS.keys()
)
self.ast = self.environment.parse(template_string)
try:
for node in self.ast.body:
self._process_node(node)
except ExtendsException:
pass
def get_output(self):
"""
Returns the generated JavaScript code.
Returns:
str
"""
# generate the JS function string
template_function = TEMPLATE_WRAPPER.format(
function_name=self.js_function_name,
template_code=self.output.getvalue()
).strip()
# get the correct module format template
module_format = JS_MODULE_FORMATS[self.js_module_format]
# generate the module code
return module_format(self.dependencies, template_function)
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
"""
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
"""
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name
def _process_node(self, node, **kwargs):
node_name = node.__class__.__name__.lower()
handler = getattr(self, '_process_' + node_name, None)
if callable(handler):
handler(node, **kwargs)
else:
raise Exception('Unknown node %s' % node)
def _process_extends(self, node, **kwargs):
"""
Processes an extends block e.g. `{% extends "some/template.jinja" %}`
"""
# find all the blocks in this template
for b in self.ast.find_all(nodes.Block):
# if not already in `child_blocks` then this is the first time a
# block with this name has been encountered.
if b.name not in self.child_blocks:
self.child_blocks[b.name] = b
else:
# otherwise we have seen this block before, so we need to find the last
# super_block and add the block from this template to the end.
block = self.child_blocks.get(b.name)
while hasattr(block, 'super_block'):
block = block.super_block
block.super_block = b
# load the parent template
parent_template = JinjaToJS(template_root=self.template_root,
template_name=node.template.value,
js_module_format=self.js_module_format,
runtime_path=self.runtime_path,
include_prefix=self.include_prefix,
include_ext=self.include_ext,
child_blocks=self.child_blocks,
dependencies=self.dependencies)
# add the parent templates output to the current output
self.output.write(parent_template.output.getvalue())
# Raise an exception so we stop parsing this template
raise ExtendsException
def _process_block(self, node, **kwargs):
"""
Processes a block e.g. `{% block my_block %}{% endblock %}`
"""
# check if this node already has a 'super_block' attribute
if not hasattr(node, 'super_block'):
# since it doesn't it must be the last block in the inheritance chain
node.super_block = None
# see if there has been a child block defined - if there is this
# will be the first block in the inheritance chain
child_block = self.child_blocks.get(node.name)
if child_block:
# we have child nodes so we need to set `node` as the
# super of the last one in the chain
last_block = child_block
while hasattr(last_block, 'super_block'):
last_block = child_block.super_block
# once we have found it, set this node as it's super block
last_block.super_block = node
# this is the node we want to process as it's the first in the inheritance chain
node = child_block
# process the block passing the it's super along, if this block
# calls super() it will be handled by `_process_call`
for n in node.body:
self._process_node(n, super_block=node.super_block, **kwargs)
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
"""
for n in node.nodes:
self._process_node(n, **kwargs)
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
"""
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";')
def _process_name(self, node, **kwargs):
"""
Processes a `Name` node. Some examples of `Name` nodes:
{{ foo }} -> 'foo' is a Name
{% if foo }} -> 'foo' is a Name
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs):
if node.name not in self.stored_names and node.ctx != 'store':
self.output.write(self.context_name)
self.output.write('.')
if node.ctx == 'store':
self.stored_names.add(node.name)
self.output.write(node.name)
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
def _process_getitem(self, node, **kwargs):
"""
Processes a `GetItem` node e.g. {{ foo["bar"] }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
if isinstance(node.arg, nodes.Slice):
self.output.write('.slice(')
if node.arg.step is not None:
raise Exception('The step argument is not supported when slicing.')
if node.arg.start is None:
self.output.write('0')
else:
self._process_node(node.arg.start, **new_kwargs)
if node.arg.stop is None:
self.output.write(')')
else:
self.output.write(',')
self._process_node(node.arg.stop, **new_kwargs)
self.output.write(')')
else:
self.output.write('[')
self._process_node(node.arg, **new_kwargs)
self.output.write(']')
def _process_for(self, node, **kwargs):
"""
Processes a for loop. e.g.
{% for number in numbers %}
{{ number }}
{% endfor %}
{% for key, value in somemap.items() %}
{{ key }} -> {{ value }}
{% %}
"""
# since a for loop can introduce new names into the context
# we need to remember the ones that existed outside the loop
previous_stored_names = self.stored_names.copy()
with self._execution():
self.output.write('__runtime.each(')
if is_method_call(node.iter, dict.keys.__name__):
self.output.write('Object.keys(')
self._process_node(node.iter, **kwargs)
if is_method_call(node.iter, dict.keys.__name__):
self.output.write(')')
self.output.write(',')
self.output.write('function')
self.output.write('(')
# javascript iterations put the value first, then the key
if isinstance(node.target, nodes.Tuple):
if len(node.target.items) > 2:
raise Exception('De-structuring more than 2 items is not supported.')
for i, item in enumerate(reversed(node.target.items)):
self._process_node(item, **kwargs)
if i < len(node.target.items) - 1:
self.output.write(',')
else:
self._process_node(node.target, **kwargs)
self.output.write(')')
self.output.write('{')
if node.test:
self.output.write('if (!(')
self._process_node(node.test, **kwargs)
self.output.write(')) { return; }')
assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target]
with self._scoped_variables(assigns, **kwargs):
for n in node.body:
self._process_node(n, **kwargs)
with self._execution():
self.output.write('}')
self.output.write(')')
self.output.write(';')
# restore the stored names
self.stored_names = previous_stored_names
def _process_if(self, node, execute_end=None, **kwargs):
"""
Processes an if block e.g. `{% if foo %} do something {% endif %}`
"""
with self._execution():
self.output.write('if')
self.output.write('(')
with option(kwargs, use_python_bool_wrapper=True):
self._process_node(node.test, **kwargs)
self.output.write(')')
self.output.write('{')
# We accept an `execute_end` function as a keyword argument as this function is
# recursive in the case of something like if-elif-elif-else. In these cases this
# invocation of this function may have to close execution opened by a previous
# invocation of this function.
if execute_end:
execute_end()
# body
for n in node.body:
self._process_node(n, **kwargs)
if not node.else_ and not node.elif_:
# no else - just close the if
with self._execution():
self.output.write('}')
else:
# either an else or an elif
with self._execution() as execute_end:
self.output.write('}')
self.output.write(' else ')
# check for elif
for n in node.elif_:
self._process_node(n, execute_end=execute_end, **kwargs)
if node.elif_ and node.else_:
self.output.write(' else ')
# open up the body
self.output.write('{')
# process the body of the else
for n in node.else_:
self._process_node(n, **kwargs)
# close the body
with self._execution():
self.output.write('}')
def _process_condexpr(self, node, **kwargs):
with self._interpolation():
self.output.write('(')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.test, **new_kwargs)
self.output.write(' ? ')
self._process_node(node.expr1, **kwargs)
self.output.write(' : ')
self._process_node(node.expr2, **kwargs)
self.output.write(')')
def _process_not(self, node, **kwargs):
self.output.write('!')
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_or(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' || ')
self._process_node(node.right, **kwargs)
def _process_and(self, node, **kwargs):
self._process_node(node.left, **kwargs)
self.output.write(' && ')
self._process_node(node.right, **kwargs)
def _process_tuple(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_call(self, node, super_block=None, **kwargs):
if is_method_call(node, DICT_ITER_METHODS):
# special case for dict methods
self._process_node(node.node.node, **kwargs)
elif is_method_call(node, 'super'):
# special case for the super() method which is available inside blocks
if not super_block:
raise Exception('super() called outside of a block with a parent.')
self._process_node(super_block, **kwargs)
else:
# just a normal function call on a context variable
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('(')
self._process_args(node, **new_kwargs)
self.output.write(')')
# only output the semi-colon if we are not interpolating
if self.state != STATE_INTERPOLATING:
self.output.write('')
def _process_filter(self, node, **kwargs):
method_name = getattr(self, '_process_filter_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
elif node.name in self.custom_filters:
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.%s(' % node.name)
self._process_node(node.node, **new_kwargs)
if getattr(node, 'args', None):
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
else:
raise Exception('Unsupported filter: %s' % node.name)
def _process_filter_safe(self, node, **kwargs):
with self._interpolation(safe=True):
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
def _process_filter_capitalize(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.capitalize(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_abs(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('Math.abs(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_attr(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self._process_node(node.node, **new_kwargs)
self.output.write('[')
self._process_node(node.args[0], **new_kwargs)
self.output.write(']')
def _process_filter_batch(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.batch(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_default(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.default(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_first(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.first(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_int(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.int(')
self._process_node(node.node, **new_kwargs)
if node.args:
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_last(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.last(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_length(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.size(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_lower(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toLowerCase()')
def _process_filter_slice(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.slice(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_filter_title(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.title(')
self._process_node(node.node, **new_kwargs)
self.output.write(')')
def _process_filter_trim(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").trim()')
def _process_filter_upper(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('(')
self._process_node(node.node, **new_kwargs)
self.output.write(' + "").toUpperCase()')
def _process_filter_truncate(self, node, **kwargs):
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
self.output.write('__filters.truncate(')
self._process_node(node.node, **new_kwargs)
self.output.write(',')
self._process_args(node, **new_kwargs)
self.output.write(')')
def _process_assign(self, node, **kwargs):
with self._execution():
self.output.write('var ')
self._process_node(node.target, **kwargs)
self.output.write(' = ')
self._process_node(node.node, **kwargs)
self.output.write(';')
def _process_with(self, node, **kwargs):
# keep a copy of the stored names before the scope
previous_stored_names = self.stored_names.copy()
# assigns in the with tag
# e.g. {% with var = "something %}
assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)]
# assigns in the with body
# e.g. {% set name = 'John' %}
assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)]
# remove assigns from the body
node.body = [x for x in node.body if not isinstance(x, nodes.Assign)]
# get a list of all the assigns in this with block
# both on the tag, and within the body of the block
all_assigns = assigns_in_tag + assigns_in_body
with self._execution():
self.output.write('(function () {')
with self._scoped_variables(all_assigns, **kwargs):
for node in node.body:
self._process_node(node, **kwargs)
with self._execution():
self.output.write('})();')
# restore previous stored names
self.stored_names = previous_stored_names
def _process_compare(self, node, **kwargs):
if len(node.ops) > 1:
raise Exception('Multiple operands are not supported.')
operand = node.ops[0]
is_equality = operand.op in ('eq', 'ne')
left_hand_is_const = isinstance(node.expr, nodes.Const)
right_hand_is_const = isinstance(operand.expr, nodes.Const)
# If the operand is equality and neither the left or right hand side are constants then we
# will need to use the JavaScript deep equals function. Ideally we want to avoid using this
# as it is quite a big function.
use_is_equal_function = is_equality and not (left_hand_is_const or right_hand_is_const)
with option(kwargs, use_python_bool_wrapper=False):
if use_is_equal_function:
if operand.op == 'ne':
self.output.write('!')
self.output.write('__runtime.isEqual(')
self._process_node(node.expr, **kwargs)
if use_is_equal_function:
self.output.write(',')
else:
self.output.write(OPERANDS.get(operand.op))
self._process_node(operand.expr, **kwargs)
if use_is_equal_function:
self.output.write(')')
def _process_operand(self, node, **kwargs):
self.output.write(OPERANDS.get(node.op))
self._process_node(node.expr, **kwargs)
def _process_const(self, node, **_):
with self._interpolation():
self.output.write(json.dumps(node.value))
def _process_nonetype(self, node, **_):
with self._interpolation():
self.output.write('null')
def _process_neg(self, node, **kwargs):
with self._interpolation():
self.output.write('-')
self._process_node(node.node, **kwargs)
def _process_list(self, node, **kwargs):
self.output.write('[')
for i, item in enumerate(node.items):
self._process_node(item, **kwargs)
if i < len(node.items) - 1:
self.output.write(',')
self.output.write(']')
def _process_test(self, node, **kwargs):
with option(kwargs, use_python_bool_wrapper=False):
method_name = getattr(self, '_process_test_%s' % node.name, None)
if callable(method_name):
method_name(node, **kwargs)
else:
raise Exception('Unsupported test: %s' % node.name)
def _process_test_defined(self, node, **kwargs):
self.output.write('(typeof ')
self._process_node(node.node, **kwargs)
self.output.write(' !== "undefined")')
def _process_test_undefined(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === undefined')
def _process_test_callable(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Function"')
def _process_test_divisibleby(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % ')
self._process_node(node.args[0], **kwargs)
self.output.write(' === 0')
def _process_test_even(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 0')
def _process_test_odd(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' % 2 === 1')
def _process_test_none(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write(' === null')
def _process_test_upper(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toUpperCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_lower(self, node, **kwargs):
self._process_node(node.node, **kwargs)
self.output.write('.toLowerCase() === ')
self._process_node(node.node, **kwargs)
def _process_test_string(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "String"')
def _process_test_mapping(self, node, **kwargs):
self.output.write('__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Object"')
def _process_test_number(self, node, **kwargs):
self.output.write('(__runtime.type(')
self._process_node(node.node, **kwargs)
self.output.write(') === "Number" && !isNaN(')
self._process_node(node.node, **kwargs)
self.output.write('))')
def _process_include(self, node, **kwargs):
with self._interpolation(safe=True):
include_path = node.template.value
if include_path == self.template_name:
# template is including itself
include_var_name = self.js_function_name
else:
if self.include_prefix:
include_path = self.include_prefix + node.template.value
elif self.js_module_format in ('es6', 'commonjs',) and self.template_name:
_, absolute_include_path, _ = self.environment.loader.get_source(
self.environment, node.template.value
)
include_path = os.path.relpath(
absolute_include_path, os.path.dirname(self.template_path)
)
if not include_path.startswith('.'):
include_path = './' + include_path
include_path = path.splitext(include_path)[0] + self.include_ext
include_var_name = self._get_depencency_var_name(include_path)
if not include_var_name:
include_var_name = self._add_dependency(include_path)
if self.js_module_format is None:
self.output.write('jinjaToJS.include("')
self.output.write(include_path)
self.output.write('");')
else:
self.output.write(include_var_name)
self.output.write('(')
self.output.write(self.context_name)
self.output.write(')')
def _process_add(self, node, **kwargs):
self._process_math(node, math_operator=' + ', **kwargs)
def _process_sub(self, node, **kwargs):
self._process_math(node, math_operator=' - ', **kwargs)
def _process_div(self, node, **kwargs):
self._process_math(node, math_operator=' / ', **kwargs)
def _process_floordiv(self, node, **kwargs):
self._process_math(node, math_operator=' / ', function='Math.floor', **kwargs)
def _process_mul(self, node, **kwargs):
self._process_math(node, math_operator=' * ', **kwargs)
def _process_mod(self, node, **kwargs):
self._process_math(node, math_operator=' % ', **kwargs)
def _process_math(self, node, math_operator=None, function=None, **kwargs):
"""
Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc...
If `function` is provided the expression is wrapped in a call to that function.
"""
with self._interpolation():
if function:
self.output.write(function)
self.output.write('(')
self._process_node(node.left, **kwargs)
self.output.write(math_operator)
self._process_node(node.right, **kwargs)
if function:
self.output.write(')')
def _process_loop_helper(self, node, **kwargs):
"""
Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }}
"""
if node.attr == LOOP_HELPER_INDEX:
self.output.write('(arguments[1] + 1)')
elif node.attr == LOOP_HELPER_INDEX_0:
self.output.write('arguments[1]')
elif node.attr == LOOP_HELPER_FIRST:
self.output.write('(arguments[1] == 0)')
elif node.attr == LOOP_HELPER_LAST:
self.output.write('(arguments[1] == arguments[2].length - 1)')
elif node.attr == LOOP_HELPER_LENGTH:
self.output.write('arguments[2].length')
def _process_args(self, node, **kwargs):
args = getattr(node, 'args', None)
if not args:
return
for i, item in enumerate(args):
self._process_node(item, **kwargs)
if i < len(node.args) - 1:
self.output.write(',')
@contextlib.contextmanager
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
"""
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
def _interpolation(self, safe=False):
did_start_interpolating = False
if self.state == STATE_DEFAULT:
did_start_interpolating = True
self.output.write('__result += "" + ')
if safe is not True:
self.output.write('__runtime.escape')
self.output.write('((__tmp = (')
self.state = STATE_INTERPOLATING
def close():
if did_start_interpolating and self.state == STATE_INTERPOLATING:
self.output.write(')) == null ? "" : __tmp);')
self.state = STATE_DEFAULT
yield close
close()
@contextlib.contextmanager
@contextlib.contextmanager
def _python_bool_wrapper(self, **kwargs):
use_python_bool_wrapper = kwargs.get('use_python_bool_wrapper')
if use_python_bool_wrapper:
self.output.write('__runtime.boolean(')
with option(kwargs, use_python_bool_wrapper=False):
yield kwargs
if use_python_bool_wrapper:
self.output.write(')')
|
michaelbrooks/twitter-monitor
|
twitter_monitor/listener.py
|
JsonStreamListener.on_status_withheld
|
python
|
def on_status_withheld(self, status_id, user_id, countries):
logger.info('Status %s withheld for user %s', status_id, user_id)
return True
|
Called when a status is withheld
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/listener.py#L91-L94
| null |
class JsonStreamListener(StreamListener):
"""
This extends the Tweepy StreamListener to avoid
closing the streaming connection when certain bad events occur.
Also skips construction of Tweepy's "Status" object since you might
use your own class anyway. Just leaves it a parsed JSON object.
Extending this would allow more conscientious handling of rate
limit messages or other errors, for example.
"""
def __init__(self, api=None):
super(JsonStreamListener, self).__init__(api)
self.streaming_exception = None
self.error = False
def on_data(self, data):
try:
entity = json.loads(data)
if not isinstance(entity, dict):
logger.error("Non-object received: %s", data, exc_info=True)
return True
except ValueError:
logger.error("Invalid data received: %s", data, exc_info=True)
return True
if 'delete' in entity:
status = entity['delete']['status']
return self.on_delete(status['id'], status['user_id'])
elif 'scrub_geo' in entity:
scrub_geo = entity['scrub_geo']
return self.on_scrub_geo(scrub_geo['user_id'], scrub_geo['up_to_status_id'])
elif 'limit' in entity:
limit = entity['limit']
return self.on_limit(limit['track'])
elif 'status_withheld' in entity:
status = entity['status_withheld']
return self.on_status_withheld(status['id'], status['user_id'], status['withheld_in_countries'])
elif 'user_withheld' in entity:
user = entity['user_withheld']
return self.on_user_withheld(user['id'], user['withheld_in_countries'])
elif 'disconnect' in entity:
disconnect = entity['disconnect']
return self.on_disconnect(disconnect['code'], disconnect['stream_name'], disconnect['reason'])
elif 'warning' in entity:
warning = entity['warning']
return self.on_stall_warning(warning['code'], warning['message'], warning['percent_full'])
elif 'in_reply_to_status_id' in entity:
return self.on_status(entity)
else:
return self.on_unknown(entity)
def on_status(self, status):
"""Called when a new status arrives"""
logger.info("Status %s received", status['id'])
return True
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
logger.info("Delete %s received", status_id)
return True
def on_scrub_geo(self, user_id, up_to_status_id):
"""Called when geolocated data must be stripped for user_id for statuses before up_to_status_id"""
logger.info("Scrub_geo received for user %s", user_id)
return True
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
logger.info('Limit received for %s', track)
return True
def on_user_withheld(self, user_id, countries):
"""Called when a user is withheld"""
logger.info('User %s withheld', user_id)
return True
def on_disconnect(self, code, stream_name, reason):
"""Called when a disconnect is received"""
logger.error('Disconnect message: %s %s %s', code, stream_name, reason)
return True
def on_stall_warning(self, code, message, percent_full):
logger.warning("Stall warning (%s): %s (%s%% full)", code, message, percent_full)
return True
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
logger.error('Twitter returned error code %s', status_code)
self.error = status_code
return False
def on_unknown(self, entity):
"""Called when an unrecognized object arrives"""
logger.error('Unknown object received: %s', repr(entity))
return True
def on_exception(self, exception):
"""An exception occurred in the streaming thread"""
logger.error('Exception from stream!', exc_info=True)
self.streaming_exception = exception
|
michaelbrooks/twitter-monitor
|
twitter_monitor/listener.py
|
JsonStreamListener.on_disconnect
|
python
|
def on_disconnect(self, code, stream_name, reason):
logger.error('Disconnect message: %s %s %s', code, stream_name, reason)
return True
|
Called when a disconnect is received
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/listener.py#L101-L104
| null |
class JsonStreamListener(StreamListener):
"""
This extends the Tweepy StreamListener to avoid
closing the streaming connection when certain bad events occur.
Also skips construction of Tweepy's "Status" object since you might
use your own class anyway. Just leaves it a parsed JSON object.
Extending this would allow more conscientious handling of rate
limit messages or other errors, for example.
"""
def __init__(self, api=None):
super(JsonStreamListener, self).__init__(api)
self.streaming_exception = None
self.error = False
def on_data(self, data):
try:
entity = json.loads(data)
if not isinstance(entity, dict):
logger.error("Non-object received: %s", data, exc_info=True)
return True
except ValueError:
logger.error("Invalid data received: %s", data, exc_info=True)
return True
if 'delete' in entity:
status = entity['delete']['status']
return self.on_delete(status['id'], status['user_id'])
elif 'scrub_geo' in entity:
scrub_geo = entity['scrub_geo']
return self.on_scrub_geo(scrub_geo['user_id'], scrub_geo['up_to_status_id'])
elif 'limit' in entity:
limit = entity['limit']
return self.on_limit(limit['track'])
elif 'status_withheld' in entity:
status = entity['status_withheld']
return self.on_status_withheld(status['id'], status['user_id'], status['withheld_in_countries'])
elif 'user_withheld' in entity:
user = entity['user_withheld']
return self.on_user_withheld(user['id'], user['withheld_in_countries'])
elif 'disconnect' in entity:
disconnect = entity['disconnect']
return self.on_disconnect(disconnect['code'], disconnect['stream_name'], disconnect['reason'])
elif 'warning' in entity:
warning = entity['warning']
return self.on_stall_warning(warning['code'], warning['message'], warning['percent_full'])
elif 'in_reply_to_status_id' in entity:
return self.on_status(entity)
else:
return self.on_unknown(entity)
def on_status(self, status):
"""Called when a new status arrives"""
logger.info("Status %s received", status['id'])
return True
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
logger.info("Delete %s received", status_id)
return True
def on_scrub_geo(self, user_id, up_to_status_id):
"""Called when geolocated data must be stripped for user_id for statuses before up_to_status_id"""
logger.info("Scrub_geo received for user %s", user_id)
return True
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
logger.info('Limit received for %s', track)
return True
def on_status_withheld(self, status_id, user_id, countries):
"""Called when a status is withheld"""
logger.info('Status %s withheld for user %s', status_id, user_id)
return True
def on_user_withheld(self, user_id, countries):
"""Called when a user is withheld"""
logger.info('User %s withheld', user_id)
return True
def on_stall_warning(self, code, message, percent_full):
logger.warning("Stall warning (%s): %s (%s%% full)", code, message, percent_full)
return True
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
logger.error('Twitter returned error code %s', status_code)
self.error = status_code
return False
def on_unknown(self, entity):
"""Called when an unrecognized object arrives"""
logger.error('Unknown object received: %s', repr(entity))
return True
def on_exception(self, exception):
"""An exception occurred in the streaming thread"""
logger.error('Exception from stream!', exc_info=True)
self.streaming_exception = exception
|
michaelbrooks/twitter-monitor
|
twitter_monitor/listener.py
|
JsonStreamListener.on_error
|
python
|
def on_error(self, status_code):
logger.error('Twitter returned error code %s', status_code)
self.error = status_code
return False
|
Called when a non-200 status code is returned
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/listener.py#L110-L114
| null |
class JsonStreamListener(StreamListener):
"""
This extends the Tweepy StreamListener to avoid
closing the streaming connection when certain bad events occur.
Also skips construction of Tweepy's "Status" object since you might
use your own class anyway. Just leaves it a parsed JSON object.
Extending this would allow more conscientious handling of rate
limit messages or other errors, for example.
"""
def __init__(self, api=None):
super(JsonStreamListener, self).__init__(api)
self.streaming_exception = None
self.error = False
def on_data(self, data):
try:
entity = json.loads(data)
if not isinstance(entity, dict):
logger.error("Non-object received: %s", data, exc_info=True)
return True
except ValueError:
logger.error("Invalid data received: %s", data, exc_info=True)
return True
if 'delete' in entity:
status = entity['delete']['status']
return self.on_delete(status['id'], status['user_id'])
elif 'scrub_geo' in entity:
scrub_geo = entity['scrub_geo']
return self.on_scrub_geo(scrub_geo['user_id'], scrub_geo['up_to_status_id'])
elif 'limit' in entity:
limit = entity['limit']
return self.on_limit(limit['track'])
elif 'status_withheld' in entity:
status = entity['status_withheld']
return self.on_status_withheld(status['id'], status['user_id'], status['withheld_in_countries'])
elif 'user_withheld' in entity:
user = entity['user_withheld']
return self.on_user_withheld(user['id'], user['withheld_in_countries'])
elif 'disconnect' in entity:
disconnect = entity['disconnect']
return self.on_disconnect(disconnect['code'], disconnect['stream_name'], disconnect['reason'])
elif 'warning' in entity:
warning = entity['warning']
return self.on_stall_warning(warning['code'], warning['message'], warning['percent_full'])
elif 'in_reply_to_status_id' in entity:
return self.on_status(entity)
else:
return self.on_unknown(entity)
def on_status(self, status):
"""Called when a new status arrives"""
logger.info("Status %s received", status['id'])
return True
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
logger.info("Delete %s received", status_id)
return True
def on_scrub_geo(self, user_id, up_to_status_id):
"""Called when geolocated data must be stripped for user_id for statuses before up_to_status_id"""
logger.info("Scrub_geo received for user %s", user_id)
return True
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
logger.info('Limit received for %s', track)
return True
def on_status_withheld(self, status_id, user_id, countries):
"""Called when a status is withheld"""
logger.info('Status %s withheld for user %s', status_id, user_id)
return True
def on_user_withheld(self, user_id, countries):
"""Called when a user is withheld"""
logger.info('User %s withheld', user_id)
return True
def on_disconnect(self, code, stream_name, reason):
"""Called when a disconnect is received"""
logger.error('Disconnect message: %s %s %s', code, stream_name, reason)
return True
def on_stall_warning(self, code, message, percent_full):
logger.warning("Stall warning (%s): %s (%s%% full)", code, message, percent_full)
return True
def on_unknown(self, entity):
"""Called when an unrecognized object arrives"""
logger.error('Unknown object received: %s', repr(entity))
return True
def on_exception(self, exception):
"""An exception occurred in the streaming thread"""
logger.error('Exception from stream!', exc_info=True)
self.streaming_exception = exception
|
michaelbrooks/twitter-monitor
|
twitter_monitor/listener.py
|
JsonStreamListener.on_exception
|
python
|
def on_exception(self, exception):
logger.error('Exception from stream!', exc_info=True)
self.streaming_exception = exception
|
An exception occurred in the streaming thread
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/listener.py#L121-L124
| null |
class JsonStreamListener(StreamListener):
"""
This extends the Tweepy StreamListener to avoid
closing the streaming connection when certain bad events occur.
Also skips construction of Tweepy's "Status" object since you might
use your own class anyway. Just leaves it a parsed JSON object.
Extending this would allow more conscientious handling of rate
limit messages or other errors, for example.
"""
def __init__(self, api=None):
super(JsonStreamListener, self).__init__(api)
self.streaming_exception = None
self.error = False
def on_data(self, data):
try:
entity = json.loads(data)
if not isinstance(entity, dict):
logger.error("Non-object received: %s", data, exc_info=True)
return True
except ValueError:
logger.error("Invalid data received: %s", data, exc_info=True)
return True
if 'delete' in entity:
status = entity['delete']['status']
return self.on_delete(status['id'], status['user_id'])
elif 'scrub_geo' in entity:
scrub_geo = entity['scrub_geo']
return self.on_scrub_geo(scrub_geo['user_id'], scrub_geo['up_to_status_id'])
elif 'limit' in entity:
limit = entity['limit']
return self.on_limit(limit['track'])
elif 'status_withheld' in entity:
status = entity['status_withheld']
return self.on_status_withheld(status['id'], status['user_id'], status['withheld_in_countries'])
elif 'user_withheld' in entity:
user = entity['user_withheld']
return self.on_user_withheld(user['id'], user['withheld_in_countries'])
elif 'disconnect' in entity:
disconnect = entity['disconnect']
return self.on_disconnect(disconnect['code'], disconnect['stream_name'], disconnect['reason'])
elif 'warning' in entity:
warning = entity['warning']
return self.on_stall_warning(warning['code'], warning['message'], warning['percent_full'])
elif 'in_reply_to_status_id' in entity:
return self.on_status(entity)
else:
return self.on_unknown(entity)
def on_status(self, status):
"""Called when a new status arrives"""
logger.info("Status %s received", status['id'])
return True
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
logger.info("Delete %s received", status_id)
return True
def on_scrub_geo(self, user_id, up_to_status_id):
"""Called when geolocated data must be stripped for user_id for statuses before up_to_status_id"""
logger.info("Scrub_geo received for user %s", user_id)
return True
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
logger.info('Limit received for %s', track)
return True
def on_status_withheld(self, status_id, user_id, countries):
"""Called when a status is withheld"""
logger.info('Status %s withheld for user %s', status_id, user_id)
return True
def on_user_withheld(self, user_id, countries):
"""Called when a user is withheld"""
logger.info('User %s withheld', user_id)
return True
def on_disconnect(self, code, stream_name, reason):
"""Called when a disconnect is received"""
logger.error('Disconnect message: %s %s %s', code, stream_name, reason)
return True
def on_stall_warning(self, code, message, percent_full):
logger.warning("Stall warning (%s): %s (%s%% full)", code, message, percent_full)
return True
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
logger.error('Twitter returned error code %s', status_code)
self.error = status_code
return False
def on_unknown(self, entity):
"""Called when an unrecognized object arrives"""
logger.error('Unknown object received: %s', repr(entity))
return True
|
michaelbrooks/twitter-monitor
|
twitter_monitor/checker.py
|
TermChecker.check
|
python
|
def check(self):
new_tracking_terms = self.update_tracking_terms()
terms_changed = False
# any deleted terms?
if self._tracking_terms_set > new_tracking_terms:
logging.debug("Some tracking terms removed")
terms_changed = True
# any added terms?
elif self._tracking_terms_set < new_tracking_terms:
logging.debug("Some tracking terms added")
terms_changed = True
# Go ahead and store for later
self._tracking_terms_set = new_tracking_terms
# If the terms changed, we need to restart the stream
return terms_changed
|
Checks if the list of tracked terms has changed.
Returns True if changed, otherwise False.
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/checker.py#L33-L57
|
[
"def update_tracking_terms(self):\n \"\"\"\n Retrieve the current set of tracked terms from wherever it is stored.\n Subclasses may check in files, databases, etc...\n\n Should return a set of strings.\n \"\"\"\n return set(['#afakehashtag'])\n"
] |
class TermChecker(object):
"""
Responsible for managing the current set of tracked terms
and checking for updates.
This is intended to be extended.
"""
def __init__(self):
self._tracking_terms_set = set()
def update_tracking_terms(self):
"""
Retrieve the current set of tracked terms from wherever it is stored.
Subclasses may check in files, databases, etc...
Should return a set of strings.
"""
return set(['#afakehashtag'])
def reset(self):
"""
Clear the list of tracked terms.
"""
self._tracking_terms_set = set()
def tracking_terms(self):
"""
Get the current list of tracked terms.
"""
return list(self._tracking_terms_set)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/checker.py
|
FileTermChecker.update_tracking_terms
|
python
|
def update_tracking_terms(self):
import codecs
with codecs.open(self.filename,"r", encoding='utf8') as input:
# read all the lines
lines = input.readlines()
# build a set of terms
new_terms = set()
for line in lines:
line = line.strip()
if len(line):
new_terms.add(line)
return set(new_terms)
|
Terms must be one-per-line.
Blank lines will be skipped.
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/checker.py#L75-L92
| null |
class FileTermChecker(TermChecker):
"""
Checks for tracked terms in a file.
"""
def __init__(self, filename):
super(FileTermChecker, self).__init__()
self.filename = filename
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
launch_debugger
|
python
|
def launch_debugger(frame, stream=None):
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
|
Interrupt running process, and provide a python prompt for
interactive debugging.
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L75-L90
| null |
"""
A simple streaming helper that takes
minimal configuration as arguments and starts
a stream to stdout.
"""
import os
import signal
import logging
import time
import json
import tweepy
from .listener import JsonStreamListener
from .checker import FileTermChecker
from .stream import DynamicTwitterStream
logger = logging.getLogger(__name__)
__all__ = ['start']
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
class BasicFileTermChecker(FileTermChecker):
"""Modified to print out status periodically"""
def __init__(self, filename, listener):
logger.info("Monitoring track file %s", filename)
super(BasicFileTermChecker, self).__init__(filename)
self.listener = listener
def update_tracking_terms(self):
self.listener.print_status()
return super(BasicFileTermChecker, self).update_tracking_terms()
def set_debug_listener(stream):
"""Break into a debugger if receives the SIGUSR1 signal"""
def debugger(sig, frame):
launch_debugger(frame, stream)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, debugger)
else:
logger.warn("Cannot set SIGUSR1 signal for debug mode.")
def terminate(listener):
"""
Exit cleanly.
"""
logger.info("Stopping because of signal")
# Let the tweet listener know it should be quitting asap
listener.set_terminate()
raise SystemExit()
def set_terminate_listeners(stream):
"""Die on SIGTERM or SIGINT"""
def stop(signum, frame):
terminate(stream.listener)
# Installs signal handlers for handling SIGINT and SIGTERM
# gracefully.
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
"""Make a tweepy auth object"""
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth
def construct_listener(outfile=None):
"""Create the listener that prints tweets"""
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
def should_continue():
return True
def begin_stream_loop(stream, poll_interval):
"""Start and maintain the streaming connection..."""
while should_continue():
try:
stream.start_polling(poll_interval)
except Exception as e:
# Infinite restart
logger.error("Exception while polling. Restarting in 1 second.", exc_info=True)
time.sleep(1) # to avoid craziness
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
set_debug_listener
|
python
|
def set_debug_listener(stream):
def debugger(sig, frame):
launch_debugger(frame, stream)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, debugger)
else:
logger.warn("Cannot set SIGUSR1 signal for debug mode.")
|
Break into a debugger if receives the SIGUSR1 signal
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L93-L102
| null |
"""
A simple streaming helper that takes
minimal configuration as arguments and starts
a stream to stdout.
"""
import os
import signal
import logging
import time
import json
import tweepy
from .listener import JsonStreamListener
from .checker import FileTermChecker
from .stream import DynamicTwitterStream
logger = logging.getLogger(__name__)
__all__ = ['start']
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
class BasicFileTermChecker(FileTermChecker):
"""Modified to print out status periodically"""
def __init__(self, filename, listener):
logger.info("Monitoring track file %s", filename)
super(BasicFileTermChecker, self).__init__(filename)
self.listener = listener
def update_tracking_terms(self):
self.listener.print_status()
return super(BasicFileTermChecker, self).update_tracking_terms()
def launch_debugger(frame, stream=None):
"""
Interrupt running process, and provide a python prompt for
interactive debugging.
"""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
def terminate(listener):
"""
Exit cleanly.
"""
logger.info("Stopping because of signal")
# Let the tweet listener know it should be quitting asap
listener.set_terminate()
raise SystemExit()
def set_terminate_listeners(stream):
"""Die on SIGTERM or SIGINT"""
def stop(signum, frame):
terminate(stream.listener)
# Installs signal handlers for handling SIGINT and SIGTERM
# gracefully.
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
"""Make a tweepy auth object"""
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth
def construct_listener(outfile=None):
"""Create the listener that prints tweets"""
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
def should_continue():
return True
def begin_stream_loop(stream, poll_interval):
"""Start and maintain the streaming connection..."""
while should_continue():
try:
stream.start_polling(poll_interval)
except Exception as e:
# Infinite restart
logger.error("Exception while polling. Restarting in 1 second.", exc_info=True)
time.sleep(1) # to avoid craziness
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
set_terminate_listeners
|
python
|
def set_terminate_listeners(stream):
def stop(signum, frame):
terminate(stream.listener)
# Installs signal handlers for handling SIGINT and SIGTERM
# gracefully.
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
|
Die on SIGTERM or SIGINT
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L115-L124
| null |
"""
A simple streaming helper that takes
minimal configuration as arguments and starts
a stream to stdout.
"""
import os
import signal
import logging
import time
import json
import tweepy
from .listener import JsonStreamListener
from .checker import FileTermChecker
from .stream import DynamicTwitterStream
logger = logging.getLogger(__name__)
__all__ = ['start']
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
class BasicFileTermChecker(FileTermChecker):
"""Modified to print out status periodically"""
def __init__(self, filename, listener):
logger.info("Monitoring track file %s", filename)
super(BasicFileTermChecker, self).__init__(filename)
self.listener = listener
def update_tracking_terms(self):
self.listener.print_status()
return super(BasicFileTermChecker, self).update_tracking_terms()
def launch_debugger(frame, stream=None):
"""
Interrupt running process, and provide a python prompt for
interactive debugging.
"""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
def set_debug_listener(stream):
"""Break into a debugger if receives the SIGUSR1 signal"""
def debugger(sig, frame):
launch_debugger(frame, stream)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, debugger)
else:
logger.warn("Cannot set SIGUSR1 signal for debug mode.")
def terminate(listener):
"""
Exit cleanly.
"""
logger.info("Stopping because of signal")
# Let the tweet listener know it should be quitting asap
listener.set_terminate()
raise SystemExit()
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
"""Make a tweepy auth object"""
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth
def construct_listener(outfile=None):
"""Create the listener that prints tweets"""
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
def should_continue():
return True
def begin_stream_loop(stream, poll_interval):
"""Start and maintain the streaming connection..."""
while should_continue():
try:
stream.start_polling(poll_interval)
except Exception as e:
# Infinite restart
logger.error("Exception while polling. Restarting in 1 second.", exc_info=True)
time.sleep(1) # to avoid craziness
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
get_tweepy_auth
|
python
|
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth
|
Make a tweepy auth object
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L127-L134
| null |
"""
A simple streaming helper that takes
minimal configuration as arguments and starts
a stream to stdout.
"""
import os
import signal
import logging
import time
import json
import tweepy
from .listener import JsonStreamListener
from .checker import FileTermChecker
from .stream import DynamicTwitterStream
logger = logging.getLogger(__name__)
__all__ = ['start']
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
class BasicFileTermChecker(FileTermChecker):
"""Modified to print out status periodically"""
def __init__(self, filename, listener):
logger.info("Monitoring track file %s", filename)
super(BasicFileTermChecker, self).__init__(filename)
self.listener = listener
def update_tracking_terms(self):
self.listener.print_status()
return super(BasicFileTermChecker, self).update_tracking_terms()
def launch_debugger(frame, stream=None):
"""
Interrupt running process, and provide a python prompt for
interactive debugging.
"""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
def set_debug_listener(stream):
"""Break into a debugger if receives the SIGUSR1 signal"""
def debugger(sig, frame):
launch_debugger(frame, stream)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, debugger)
else:
logger.warn("Cannot set SIGUSR1 signal for debug mode.")
def terminate(listener):
"""
Exit cleanly.
"""
logger.info("Stopping because of signal")
# Let the tweet listener know it should be quitting asap
listener.set_terminate()
raise SystemExit()
def set_terminate_listeners(stream):
"""Die on SIGTERM or SIGINT"""
def stop(signum, frame):
terminate(stream.listener)
# Installs signal handlers for handling SIGINT and SIGTERM
# gracefully.
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
def construct_listener(outfile=None):
"""Create the listener that prints tweets"""
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
def should_continue():
return True
def begin_stream_loop(stream, poll_interval):
"""Start and maintain the streaming connection..."""
while should_continue():
try:
stream.start_polling(poll_interval)
except Exception as e:
# Infinite restart
logger.error("Exception while polling. Restarting in 1 second.", exc_info=True)
time.sleep(1) # to avoid craziness
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
construct_listener
|
python
|
def construct_listener(outfile=None):
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
|
Create the listener that prints tweets
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L137-L145
| null |
"""
A simple streaming helper that takes
minimal configuration as arguments and starts
a stream to stdout.
"""
import os
import signal
import logging
import time
import json
import tweepy
from .listener import JsonStreamListener
from .checker import FileTermChecker
from .stream import DynamicTwitterStream
logger = logging.getLogger(__name__)
__all__ = ['start']
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
class BasicFileTermChecker(FileTermChecker):
"""Modified to print out status periodically"""
def __init__(self, filename, listener):
logger.info("Monitoring track file %s", filename)
super(BasicFileTermChecker, self).__init__(filename)
self.listener = listener
def update_tracking_terms(self):
self.listener.print_status()
return super(BasicFileTermChecker, self).update_tracking_terms()
def launch_debugger(frame, stream=None):
"""
Interrupt running process, and provide a python prompt for
interactive debugging.
"""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
def set_debug_listener(stream):
"""Break into a debugger if receives the SIGUSR1 signal"""
def debugger(sig, frame):
launch_debugger(frame, stream)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, debugger)
else:
logger.warn("Cannot set SIGUSR1 signal for debug mode.")
def terminate(listener):
"""
Exit cleanly.
"""
logger.info("Stopping because of signal")
# Let the tweet listener know it should be quitting asap
listener.set_terminate()
raise SystemExit()
def set_terminate_listeners(stream):
"""Die on SIGTERM or SIGINT"""
def stop(signum, frame):
terminate(stream.listener)
# Installs signal handlers for handling SIGINT and SIGTERM
# gracefully.
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
"""Make a tweepy auth object"""
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth
def construct_listener(outfile=None):
"""Create the listener that prints tweets"""
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
def should_continue():
return True
def begin_stream_loop(stream, poll_interval):
"""Start and maintain the streaming connection..."""
while should_continue():
try:
stream.start_polling(poll_interval)
except Exception as e:
# Infinite restart
logger.error("Exception while polling. Restarting in 1 second.", exc_info=True)
time.sleep(1) # to avoid craziness
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
begin_stream_loop
|
python
|
def begin_stream_loop(stream, poll_interval):
while should_continue():
try:
stream.start_polling(poll_interval)
except Exception as e:
# Infinite restart
logger.error("Exception while polling. Restarting in 1 second.", exc_info=True)
time.sleep(1)
|
Start and maintain the streaming connection...
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L150-L158
|
[
"def should_continue():\n return True\n",
"def start_polling(self, interval):\n \"\"\"\n Start polling for term updates and streaming.\n \"\"\"\n interval = float(interval)\n\n self.polling = True\n\n # clear the stored list of terms - we aren't tracking any\n self.term_checker.reset()\n\n logger.info(\"Starting polling for changes to the track list\")\n while self.polling:\n\n loop_start = time()\n\n self.update_stream()\n self.handle_exceptions()\n\n # wait for the interval unless interrupted, compensating for time elapsed in the loop\n elapsed = time() - loop_start\n sleep(max(0.1, interval - elapsed))\n\n logger.warning(\"Term poll ceased!\")\n"
] |
"""
A simple streaming helper that takes
minimal configuration as arguments and starts
a stream to stdout.
"""
import os
import signal
import logging
import time
import json
import tweepy
from .listener import JsonStreamListener
from .checker import FileTermChecker
from .stream import DynamicTwitterStream
logger = logging.getLogger(__name__)
__all__ = ['start']
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
class BasicFileTermChecker(FileTermChecker):
"""Modified to print out status periodically"""
def __init__(self, filename, listener):
logger.info("Monitoring track file %s", filename)
super(BasicFileTermChecker, self).__init__(filename)
self.listener = listener
def update_tracking_terms(self):
self.listener.print_status()
return super(BasicFileTermChecker, self).update_tracking_terms()
def launch_debugger(frame, stream=None):
"""
Interrupt running process, and provide a python prompt for
interactive debugging.
"""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
def set_debug_listener(stream):
"""Break into a debugger if receives the SIGUSR1 signal"""
def debugger(sig, frame):
launch_debugger(frame, stream)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, debugger)
else:
logger.warn("Cannot set SIGUSR1 signal for debug mode.")
def terminate(listener):
"""
Exit cleanly.
"""
logger.info("Stopping because of signal")
# Let the tweet listener know it should be quitting asap
listener.set_terminate()
raise SystemExit()
def set_terminate_listeners(stream):
"""Die on SIGTERM or SIGINT"""
def stop(signum, frame):
terminate(stream.listener)
# Installs signal handlers for handling SIGINT and SIGTERM
# gracefully.
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
"""Make a tweepy auth object"""
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth
def construct_listener(outfile=None):
"""Create the listener that prints tweets"""
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
def should_continue():
return True
# to avoid craziness
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
"""Start the stream."""
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
start
|
python
|
def start(track_file,
twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret,
poll_interval=15,
unfiltered=False,
languages=None,
debug=False,
outfile=None):
listener = construct_listener(outfile)
checker = BasicFileTermChecker(track_file, listener)
auth = get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret)
stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages)
set_terminate_listeners(stream)
if debug:
set_debug_listener(stream)
begin_stream_loop(stream, poll_interval)
|
Start the stream.
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L161-L186
|
[
"def set_debug_listener(stream):\n \"\"\"Break into a debugger if receives the SIGUSR1 signal\"\"\"\n\n def debugger(sig, frame):\n launch_debugger(frame, stream)\n\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, debugger)\n else:\n logger.warn(\"Cannot set SIGUSR1 signal for debug mode.\")\n",
"def set_terminate_listeners(stream):\n \"\"\"Die on SIGTERM or SIGINT\"\"\"\n\n def stop(signum, frame):\n terminate(stream.listener)\n\n # Installs signal handlers for handling SIGINT and SIGTERM\n # gracefully.\n signal.signal(signal.SIGINT, stop)\n signal.signal(signal.SIGTERM, stop)\n",
"def get_tweepy_auth(twitter_api_key,\n twitter_api_secret,\n twitter_access_token,\n twitter_access_token_secret):\n \"\"\"Make a tweepy auth object\"\"\"\n auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)\n auth.set_access_token(twitter_access_token, twitter_access_token_secret)\n return auth\n",
"def construct_listener(outfile=None):\n \"\"\"Create the listener that prints tweets\"\"\"\n if outfile is not None:\n if os.path.exists(outfile):\n raise IOError(\"File %s already exists\" % outfile)\n\n outfile = open(outfile, 'wb')\n\n return PrintingListener(out=outfile)\n",
"def begin_stream_loop(stream, poll_interval):\n \"\"\"Start and maintain the streaming connection...\"\"\"\n while should_continue():\n try:\n stream.start_polling(poll_interval)\n except Exception as e:\n # Infinite restart\n logger.error(\"Exception while polling. Restarting in 1 second.\", exc_info=True)\n time.sleep(1) # to avoid craziness\n"
] |
"""
A simple streaming helper that takes
minimal configuration as arguments and starts
a stream to stdout.
"""
import os
import signal
import logging
import time
import json
import tweepy
from .listener import JsonStreamListener
from .checker import FileTermChecker
from .stream import DynamicTwitterStream
logger = logging.getLogger(__name__)
__all__ = ['start']
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
class BasicFileTermChecker(FileTermChecker):
"""Modified to print out status periodically"""
def __init__(self, filename, listener):
logger.info("Monitoring track file %s", filename)
super(BasicFileTermChecker, self).__init__(filename)
self.listener = listener
def update_tracking_terms(self):
self.listener.print_status()
return super(BasicFileTermChecker, self).update_tracking_terms()
def launch_debugger(frame, stream=None):
"""
Interrupt running process, and provide a python prompt for
interactive debugging.
"""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
import code, traceback
i = code.InteractiveConsole(d)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message)
def set_debug_listener(stream):
"""Break into a debugger if receives the SIGUSR1 signal"""
def debugger(sig, frame):
launch_debugger(frame, stream)
if hasattr(signal, 'SIGUSR1'):
signal.signal(signal.SIGUSR1, debugger)
else:
logger.warn("Cannot set SIGUSR1 signal for debug mode.")
def terminate(listener):
"""
Exit cleanly.
"""
logger.info("Stopping because of signal")
# Let the tweet listener know it should be quitting asap
listener.set_terminate()
raise SystemExit()
def set_terminate_listeners(stream):
"""Die on SIGTERM or SIGINT"""
def stop(signum, frame):
terminate(stream.listener)
# Installs signal handlers for handling SIGINT and SIGTERM
# gracefully.
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
"""Make a tweepy auth object"""
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth
def construct_listener(outfile=None):
"""Create the listener that prints tweets"""
if outfile is not None:
if os.path.exists(outfile):
raise IOError("File %s already exists" % outfile)
outfile = open(outfile, 'wb')
return PrintingListener(out=outfile)
def should_continue():
return True
def begin_stream_loop(stream, poll_interval):
"""Start and maintain the streaming connection..."""
while should_continue():
try:
stream.start_polling(poll_interval)
except Exception as e:
# Infinite restart
logger.error("Exception while polling. Restarting in 1 second.", exc_info=True)
time.sleep(1) # to avoid craziness
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
PrintingListener.on_status
|
python
|
def on_status(self, status):
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
|
Print out some tweets
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L38-L44
| null |
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
def print_status(self):
"""Print out the current tweet rate and reset the counter"""
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
|
michaelbrooks/twitter-monitor
|
twitter_monitor/basic_stream.py
|
PrintingListener.print_status
|
python
|
def print_status(self):
tweets = self.received
now = time.time()
diff = now - self.since
self.since = now
self.received = 0
if diff > 0:
logger.info("Receiving tweets at %s tps", tweets / diff)
|
Print out the current tweet rate and reset the counter
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/basic_stream.py#L50-L58
| null |
class PrintingListener(JsonStreamListener):
"""A listener that writes to a file or stdout"""
def __init__(self, api=None, out=None):
super(PrintingListener, self).__init__(api)
if out is None:
import sys
out = sys.stdout
self.out = out
self.terminate = False
self.received = 0
self.since = time.time()
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate
def set_terminate(self):
"""Notify the tweepy stream that it should quit"""
self.terminate = True
|
michaelbrooks/twitter-monitor
|
twitter_monitor/stream.py
|
DynamicTwitterStream.start_polling
|
python
|
def start_polling(self, interval):
interval = float(interval)
self.polling = True
# clear the stored list of terms - we aren't tracking any
self.term_checker.reset()
logger.info("Starting polling for changes to the track list")
while self.polling:
loop_start = time()
self.update_stream()
self.handle_exceptions()
# wait for the interval unless interrupted, compensating for time elapsed in the loop
elapsed = time() - loop_start
sleep(max(0.1, interval - elapsed))
logger.warning("Term poll ceased!")
|
Start polling for term updates and streaming.
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/stream.py#L33-L56
|
[
"def update_stream(self):\n \"\"\"\n Restarts the stream with the current list of tracking terms.\n \"\"\"\n\n need_to_restart = False\n\n # If we think we are running, but something has gone wrong in the streaming thread\n # Restart it.\n if self.stream is not None and not self.stream.running:\n logger.warning(\"Stream exists but isn't running\")\n self.listener.error = False\n self.listener.streaming_exception = None\n need_to_restart = True\n\n # Check if the tracking list has changed\n if self.term_checker.check():\n logger.info(\"Terms have changed\")\n need_to_restart = True\n\n # If we aren't running and we are allowing unfiltered streams\n if self.stream is None and self.unfiltered:\n need_to_restart = True\n\n if not need_to_restart:\n return\n\n logger.info(\"Restarting stream...\")\n\n # Stop any old stream\n self.stop_stream()\n\n # Start a new stream\n self.start_stream()\n",
"def handle_exceptions(self):\n # check to see if an exception was raised in the streaming thread\n if self.listener.streaming_exception is not None:\n\n # Clear the exception\n exc = self.listener.streaming_exception\n self.listener.streaming_exception = None\n\n logger.warning(\"Streaming exception: %s\", exc)\n # propagate outward\n raise exc\n"
] |
class DynamicTwitterStream(object):
"""
A wrapper around Tweepy's Stream class that causes
streaming to be executed in a secondary thread.
Meanwhile the primary thread sleeps for an interval between checking for
term list updates.
"""
# Number of seconds to wait for the stream to stop
STOP_TIMEOUT = 1
def __init__(self, auth, listener, term_checker, **options):
self.auth = auth
self.listener = listener
self.term_checker = term_checker
self.polling = False
self.stream = None
self.retry_count = options.get("retry_count", 5)
self.unfiltered = options.get('unfiltered', False)
self.languages = options.get('languages', None)
def stop_polling(self):
"""Halts the polling loop and streaming"""
logger.info("Stopping polling loop")
self.polling = False
self.stop_stream()
def update_stream(self):
"""
Restarts the stream with the current list of tracking terms.
"""
need_to_restart = False
# If we think we are running, but something has gone wrong in the streaming thread
# Restart it.
if self.stream is not None and not self.stream.running:
logger.warning("Stream exists but isn't running")
self.listener.error = False
self.listener.streaming_exception = None
need_to_restart = True
# Check if the tracking list has changed
if self.term_checker.check():
logger.info("Terms have changed")
need_to_restart = True
# If we aren't running and we are allowing unfiltered streams
if self.stream is None and self.unfiltered:
need_to_restart = True
if not need_to_restart:
return
logger.info("Restarting stream...")
# Stop any old stream
self.stop_stream()
# Start a new stream
self.start_stream()
def start_stream(self):
"""Starts a stream with teh current tracking terms"""
tracking_terms = self.term_checker.tracking_terms()
if len(tracking_terms) > 0 or self.unfiltered:
# we have terms to track, so build a new stream
self.stream = tweepy.Stream(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
if len(tracking_terms) > 0:
logger.info("Starting new twitter stream with %s terms:", len(tracking_terms))
logger.info(" %s", repr(tracking_terms))
# Launch it in a new thread
self.stream.filter(track=tracking_terms, async=True, languages=self.languages)
else:
logger.info("Starting new unfiltered stream")
self.stream.sample(async=True, languages=self.languages)
def stop_stream(self):
"""
Stops the current stream. Blocks until this is done.
"""
if self.stream is not None:
# There is a streaming thread
logger.warning("Stopping twitter stream...")
self.stream.disconnect()
self.stream = None
# wait a few seconds to allow the streaming to actually stop
sleep(self.STOP_TIMEOUT)
def handle_exceptions(self):
# check to see if an exception was raised in the streaming thread
if self.listener.streaming_exception is not None:
# Clear the exception
exc = self.listener.streaming_exception
self.listener.streaming_exception = None
logger.warning("Streaming exception: %s", exc)
# propagate outward
raise exc
|
michaelbrooks/twitter-monitor
|
twitter_monitor/stream.py
|
DynamicTwitterStream.update_stream
|
python
|
def update_stream(self):
need_to_restart = False
# If we think we are running, but something has gone wrong in the streaming thread
# Restart it.
if self.stream is not None and not self.stream.running:
logger.warning("Stream exists but isn't running")
self.listener.error = False
self.listener.streaming_exception = None
need_to_restart = True
# Check if the tracking list has changed
if self.term_checker.check():
logger.info("Terms have changed")
need_to_restart = True
# If we aren't running and we are allowing unfiltered streams
if self.stream is None and self.unfiltered:
need_to_restart = True
if not need_to_restart:
return
logger.info("Restarting stream...")
# Stop any old stream
self.stop_stream()
# Start a new stream
self.start_stream()
|
Restarts the stream with the current list of tracking terms.
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/stream.py#L65-L98
|
[
"def start_stream(self):\n \"\"\"Starts a stream with teh current tracking terms\"\"\"\n\n tracking_terms = self.term_checker.tracking_terms()\n\n if len(tracking_terms) > 0 or self.unfiltered:\n # we have terms to track, so build a new stream\n self.stream = tweepy.Stream(self.auth, self.listener,\n stall_warnings=True,\n timeout=90,\n retry_count=self.retry_count)\n\n if len(tracking_terms) > 0:\n logger.info(\"Starting new twitter stream with %s terms:\", len(tracking_terms))\n logger.info(\" %s\", repr(tracking_terms))\n\n # Launch it in a new thread\n self.stream.filter(track=tracking_terms, async=True, languages=self.languages)\n else:\n logger.info(\"Starting new unfiltered stream\")\n self.stream.sample(async=True, languages=self.languages)\n",
"def stop_stream(self):\n \"\"\"\n Stops the current stream. Blocks until this is done.\n \"\"\"\n\n if self.stream is not None:\n # There is a streaming thread\n\n logger.warning(\"Stopping twitter stream...\")\n self.stream.disconnect()\n\n self.stream = None\n\n # wait a few seconds to allow the streaming to actually stop\n sleep(self.STOP_TIMEOUT)\n"
] |
class DynamicTwitterStream(object):
"""
A wrapper around Tweepy's Stream class that causes
streaming to be executed in a secondary thread.
Meanwhile the primary thread sleeps for an interval between checking for
term list updates.
"""
# Number of seconds to wait for the stream to stop
STOP_TIMEOUT = 1
def __init__(self, auth, listener, term_checker, **options):
self.auth = auth
self.listener = listener
self.term_checker = term_checker
self.polling = False
self.stream = None
self.retry_count = options.get("retry_count", 5)
self.unfiltered = options.get('unfiltered', False)
self.languages = options.get('languages', None)
def start_polling(self, interval):
"""
Start polling for term updates and streaming.
"""
interval = float(interval)
self.polling = True
# clear the stored list of terms - we aren't tracking any
self.term_checker.reset()
logger.info("Starting polling for changes to the track list")
while self.polling:
loop_start = time()
self.update_stream()
self.handle_exceptions()
# wait for the interval unless interrupted, compensating for time elapsed in the loop
elapsed = time() - loop_start
sleep(max(0.1, interval - elapsed))
logger.warning("Term poll ceased!")
def stop_polling(self):
"""Halts the polling loop and streaming"""
logger.info("Stopping polling loop")
self.polling = False
self.stop_stream()
def update_stream(self):
"""
Restarts the stream with the current list of tracking terms.
"""
need_to_restart = False
# If we think we are running, but something has gone wrong in the streaming thread
# Restart it.
if self.stream is not None and not self.stream.running:
logger.warning("Stream exists but isn't running")
self.listener.error = False
self.listener.streaming_exception = None
need_to_restart = True
# Check if the tracking list has changed
if self.term_checker.check():
logger.info("Terms have changed")
need_to_restart = True
# If we aren't running and we are allowing unfiltered streams
if self.stream is None and self.unfiltered:
need_to_restart = True
if not need_to_restart:
return
logger.info("Restarting stream...")
# Stop any old stream
self.stop_stream()
# Start a new stream
self.start_stream()
def start_stream(self):
"""Starts a stream with teh current tracking terms"""
tracking_terms = self.term_checker.tracking_terms()
if len(tracking_terms) > 0 or self.unfiltered:
# we have terms to track, so build a new stream
self.stream = tweepy.Stream(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
if len(tracking_terms) > 0:
logger.info("Starting new twitter stream with %s terms:", len(tracking_terms))
logger.info(" %s", repr(tracking_terms))
# Launch it in a new thread
self.stream.filter(track=tracking_terms, async=True, languages=self.languages)
else:
logger.info("Starting new unfiltered stream")
self.stream.sample(async=True, languages=self.languages)
def stop_stream(self):
"""
Stops the current stream. Blocks until this is done.
"""
if self.stream is not None:
# There is a streaming thread
logger.warning("Stopping twitter stream...")
self.stream.disconnect()
self.stream = None
# wait a few seconds to allow the streaming to actually stop
sleep(self.STOP_TIMEOUT)
def handle_exceptions(self):
# check to see if an exception was raised in the streaming thread
if self.listener.streaming_exception is not None:
# Clear the exception
exc = self.listener.streaming_exception
self.listener.streaming_exception = None
logger.warning("Streaming exception: %s", exc)
# propagate outward
raise exc
|
michaelbrooks/twitter-monitor
|
twitter_monitor/stream.py
|
DynamicTwitterStream.start_stream
|
python
|
def start_stream(self):
tracking_terms = self.term_checker.tracking_terms()
if len(tracking_terms) > 0 or self.unfiltered:
# we have terms to track, so build a new stream
self.stream = tweepy.Stream(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
if len(tracking_terms) > 0:
logger.info("Starting new twitter stream with %s terms:", len(tracking_terms))
logger.info(" %s", repr(tracking_terms))
# Launch it in a new thread
self.stream.filter(track=tracking_terms, async=True, languages=self.languages)
else:
logger.info("Starting new unfiltered stream")
self.stream.sample(async=True, languages=self.languages)
|
Starts a stream with teh current tracking terms
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/stream.py#L100-L120
| null |
class DynamicTwitterStream(object):
"""
A wrapper around Tweepy's Stream class that causes
streaming to be executed in a secondary thread.
Meanwhile the primary thread sleeps for an interval between checking for
term list updates.
"""
# Number of seconds to wait for the stream to stop
STOP_TIMEOUT = 1
def __init__(self, auth, listener, term_checker, **options):
self.auth = auth
self.listener = listener
self.term_checker = term_checker
self.polling = False
self.stream = None
self.retry_count = options.get("retry_count", 5)
self.unfiltered = options.get('unfiltered', False)
self.languages = options.get('languages', None)
def start_polling(self, interval):
"""
Start polling for term updates and streaming.
"""
interval = float(interval)
self.polling = True
# clear the stored list of terms - we aren't tracking any
self.term_checker.reset()
logger.info("Starting polling for changes to the track list")
while self.polling:
loop_start = time()
self.update_stream()
self.handle_exceptions()
# wait for the interval unless interrupted, compensating for time elapsed in the loop
elapsed = time() - loop_start
sleep(max(0.1, interval - elapsed))
logger.warning("Term poll ceased!")
def stop_polling(self):
"""Halts the polling loop and streaming"""
logger.info("Stopping polling loop")
self.polling = False
self.stop_stream()
def update_stream(self):
"""
Restarts the stream with the current list of tracking terms.
"""
need_to_restart = False
# If we think we are running, but something has gone wrong in the streaming thread
# Restart it.
if self.stream is not None and not self.stream.running:
logger.warning("Stream exists but isn't running")
self.listener.error = False
self.listener.streaming_exception = None
need_to_restart = True
# Check if the tracking list has changed
if self.term_checker.check():
logger.info("Terms have changed")
need_to_restart = True
# If we aren't running and we are allowing unfiltered streams
if self.stream is None and self.unfiltered:
need_to_restart = True
if not need_to_restart:
return
logger.info("Restarting stream...")
# Stop any old stream
self.stop_stream()
# Start a new stream
self.start_stream()
def start_stream(self):
"""Starts a stream with teh current tracking terms"""
tracking_terms = self.term_checker.tracking_terms()
if len(tracking_terms) > 0 or self.unfiltered:
# we have terms to track, so build a new stream
self.stream = tweepy.Stream(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
if len(tracking_terms) > 0:
logger.info("Starting new twitter stream with %s terms:", len(tracking_terms))
logger.info(" %s", repr(tracking_terms))
# Launch it in a new thread
self.stream.filter(track=tracking_terms, async=True, languages=self.languages)
else:
logger.info("Starting new unfiltered stream")
self.stream.sample(async=True, languages=self.languages)
def stop_stream(self):
"""
Stops the current stream. Blocks until this is done.
"""
if self.stream is not None:
# There is a streaming thread
logger.warning("Stopping twitter stream...")
self.stream.disconnect()
self.stream = None
# wait a few seconds to allow the streaming to actually stop
sleep(self.STOP_TIMEOUT)
def handle_exceptions(self):
# check to see if an exception was raised in the streaming thread
if self.listener.streaming_exception is not None:
# Clear the exception
exc = self.listener.streaming_exception
self.listener.streaming_exception = None
logger.warning("Streaming exception: %s", exc)
# propagate outward
raise exc
|
michaelbrooks/twitter-monitor
|
twitter_monitor/stream.py
|
DynamicTwitterStream.stop_stream
|
python
|
def stop_stream(self):
if self.stream is not None:
# There is a streaming thread
logger.warning("Stopping twitter stream...")
self.stream.disconnect()
self.stream = None
# wait a few seconds to allow the streaming to actually stop
sleep(self.STOP_TIMEOUT)
|
Stops the current stream. Blocks until this is done.
|
train
|
https://github.com/michaelbrooks/twitter-monitor/blob/3f99cea8492d3bdaa16f28a038bc8cf6022222ba/twitter_monitor/stream.py#L122-L136
| null |
class DynamicTwitterStream(object):
"""
A wrapper around Tweepy's Stream class that causes
streaming to be executed in a secondary thread.
Meanwhile the primary thread sleeps for an interval between checking for
term list updates.
"""
# Number of seconds to wait for the stream to stop
STOP_TIMEOUT = 1
def __init__(self, auth, listener, term_checker, **options):
self.auth = auth
self.listener = listener
self.term_checker = term_checker
self.polling = False
self.stream = None
self.retry_count = options.get("retry_count", 5)
self.unfiltered = options.get('unfiltered', False)
self.languages = options.get('languages', None)
def start_polling(self, interval):
"""
Start polling for term updates and streaming.
"""
interval = float(interval)
self.polling = True
# clear the stored list of terms - we aren't tracking any
self.term_checker.reset()
logger.info("Starting polling for changes to the track list")
while self.polling:
loop_start = time()
self.update_stream()
self.handle_exceptions()
# wait for the interval unless interrupted, compensating for time elapsed in the loop
elapsed = time() - loop_start
sleep(max(0.1, interval - elapsed))
logger.warning("Term poll ceased!")
def stop_polling(self):
"""Halts the polling loop and streaming"""
logger.info("Stopping polling loop")
self.polling = False
self.stop_stream()
def update_stream(self):
"""
Restarts the stream with the current list of tracking terms.
"""
need_to_restart = False
# If we think we are running, but something has gone wrong in the streaming thread
# Restart it.
if self.stream is not None and not self.stream.running:
logger.warning("Stream exists but isn't running")
self.listener.error = False
self.listener.streaming_exception = None
need_to_restart = True
# Check if the tracking list has changed
if self.term_checker.check():
logger.info("Terms have changed")
need_to_restart = True
# If we aren't running and we are allowing unfiltered streams
if self.stream is None and self.unfiltered:
need_to_restart = True
if not need_to_restart:
return
logger.info("Restarting stream...")
# Stop any old stream
self.stop_stream()
# Start a new stream
self.start_stream()
def start_stream(self):
"""Starts a stream with teh current tracking terms"""
tracking_terms = self.term_checker.tracking_terms()
if len(tracking_terms) > 0 or self.unfiltered:
# we have terms to track, so build a new stream
self.stream = tweepy.Stream(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
if len(tracking_terms) > 0:
logger.info("Starting new twitter stream with %s terms:", len(tracking_terms))
logger.info(" %s", repr(tracking_terms))
# Launch it in a new thread
self.stream.filter(track=tracking_terms, async=True, languages=self.languages)
else:
logger.info("Starting new unfiltered stream")
self.stream.sample(async=True, languages=self.languages)
def handle_exceptions(self):
# check to see if an exception was raised in the streaming thread
if self.listener.streaming_exception is not None:
# Clear the exception
exc = self.listener.streaming_exception
self.listener.streaming_exception = None
logger.warning("Streaming exception: %s", exc)
# propagate outward
raise exc
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.roles
|
python
|
def roles(self):
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
|
gets user groups
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L25-L28
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.get_permissions
|
python
|
def get_permissions(self, role):
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
|
gets permissions of role
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L30-L36
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.get_user_permissions
|
python
|
def get_user_permissions(self, user):
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
|
get permissions of a user
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L39-L50
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.get_user_roles
|
python
|
def get_user_roles(self, user):
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
|
get permissions of a user
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L52-L59
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.get_role_members
|
python
|
def get_role_members(self, role):
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
|
get permissions of a user
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L62-L66
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.which_roles_can
|
python
|
def which_roles_can(self, name):
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
|
Which role can SendMail?
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L68-L71
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.which_users_can
|
python
|
def which_users_can(self, name):
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
|
Which role can SendMail?
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L73-L77
|
[
"def which_roles_can(self, name):\n \"\"\"Which role can SendMail? \"\"\"\n targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()\n return [{'role': group.role} for group in targetPermissionRecords.groups]\n"
] |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.get_role
|
python
|
def get_role(self, role):
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
|
Returns a role object
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L79-L83
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.add_role
|
python
|
def add_role(self, role, description=None):
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
|
Creates a new group
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L85-L92
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.del_role
|
python
|
def del_role(self, role):
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
|
deletes a group
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L94-L101
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.add_membership
|
python
|
def add_membership(self, user, role):
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
|
make user a member of a group
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L103-L116
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.del_membership
|
python
|
def del_membership(self, user, role):
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
|
dismember user from a group
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L119-L130
|
[
"def has_membership(self, user, role):\n \"\"\" checks if user is member of a group\"\"\"\n targetRecord = AuthMembership.objects(creator=self.client, user=user).first()\n if targetRecord:\n return role in [i.role for i in targetRecord.groups]\n return False\n"
] |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.has_membership
|
python
|
def has_membership(self, user, role):
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
|
checks if user is member of a group
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L132-L137
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.add_permission
|
python
|
def add_permission(self, role, name):
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
|
authorize a group for something
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L140-L151
|
[
"def has_permission(self, role, name):\n \"\"\" verify groups authorization \"\"\"\n targetGroup = AuthGroup.objects(role=role, creator=self.client).first()\n if not targetGroup:\n return False\n target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()\n if target:\n return True\n return False\n"
] |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.del_permission
|
python
|
def del_permission(self, role, name):
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
|
revoke authorization of a group
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L153-L162
|
[
"def has_permission(self, role, name):\n \"\"\" verify groups authorization \"\"\"\n targetGroup = AuthGroup.objects(role=role, creator=self.client).first()\n if not targetGroup:\n return False\n target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()\n if target:\n return True\n return False\n"
] |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
def user_has_permission(self, user, name):
""" verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
ourway/auth
|
auth/CAS/authorization.py
|
Authorization.user_has_permission
|
python
|
def user_has_permission(self, user, name):
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return False
for group in targetRecord.groups:
if self.has_permission(group.role, name):
return True
return False
|
verify user has permission
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L174-L182
| null |
class Authorization(object):
""" Main Authorization class """
def __init__(self, client):
"""Initialize Authorization with a client
@type client: String
"""
self.client = client
make_db_connection()
@property
def roles(self):
"""gets user groups"""
result = AuthGroup.objects(creator=self.client).only('role')
return json.loads(result.to_json())
def get_permissions(self, role):
"""gets permissions of role"""
target_role = AuthGroup.objects(role=role, creator=self.client).first()
if not target_role:
return '[]'
targets = AuthPermission.objects(groups=target_role, creator=self.client).only('name')
return json.loads(targets.to_json())
def get_user_permissions(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
targetPermissionRecords = AuthPermission.objects(creator=self.client,
groups=group).only('name')
for each_permission in targetPermissionRecords:
results.append({'name':each_permission.name})
return results
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results
def get_role_members(self, role):
"""get permissions of a user"""
targetRoleDb = AuthGroup.objects(creator=self.client, role=role)
members = AuthMembership.objects(groups__in=targetRoleDb).only('user')
return json.loads(members.to_json())
def which_roles_can(self, name):
"""Which role can SendMail? """
targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first()
return [{'role': group.role} for group in targetPermissionRecords.groups]
def which_users_can(self, name):
"""Which role can SendMail? """
_roles = self.which_roles_can(name)
result = [self.get_role_members(i.get('role')) for i in _roles]
return result
def get_role(self, role):
"""Returns a role object
"""
role = AuthGroup.objects(role=role, creator=self.client).first()
return role
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False
def del_role(self, role):
""" deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first()
if target:
target.delete()
return True
else:
return False
def add_membership(self, user, role):
""" make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthMembership.objects(user=user, creator=self.client).first()
if not target:
target = AuthMembership(user=user, creator=self.client)
if not role in [i.role for i in target.groups]:
target.groups.append(targetGroup)
target.save()
return True
def del_membership(self, user, role):
""" dismember user from a group """
if not self.has_membership(user, role):
return True
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if not targetRecord:
return True
for group in targetRecord.groups:
if group.role==role:
targetRecord.groups.remove(group)
targetRecord.save()
return True
def has_membership(self, user, role):
""" checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first()
if targetRecord:
return role in [i.role for i in targetRecord.groups]
return False
def add_permission(self, role, name):
""" authorize a group for something """
if self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
# Create or update
permission = AuthPermission.objects(name=name).update(
add_to_set__groups=[targetGroup], creator=self.client, upsert=True
)
return True
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True
def has_permission(self, role, name):
""" verify groups authorization """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
if not targetGroup:
return False
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if target:
return True
return False
|
ourway/auth
|
auth/CAS/models/db.py
|
handler
|
python
|
def handler(event):
def decorator(fn):
def apply(cls):
event.connect(fn, sender=cls)
return cls
fn.apply = apply
return fn
return decorator
|
Signal decorator to allow use of callback functions as class decorators.
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/models/db.py#L47-L56
| null |
#!/usr/bin/env python
"""
Workflow:
Creating permissions
First we need some groups, so API user must crete some groups
data = {role:'editors', creator:'my_secret'}
POST /api/authorization/group data
Then client can assign user:
data = {group:'editors', user='rodmena', creator='my_secret'}
POST /api/authorization/membership
Now we can add some permissions to editors group:
data = {name:'can_read_my_posts', creator:'my_secret', group:'editors'}
POST /api/authorization/permission data
Using permissions:
Request:
GET /api/authorization/has_permission/rodmena?name=can_read_my_posts
Response:
{
"result":true
}
"""
__all__ = ['make_db_connection', 'AuthMembership', 'AuthGroup', 'AuthPermission']
import os
import datetime
from mongoengine import *
from mongoengine import signals
def make_db_connection():
mongo_host = os.getenv('MONGO_HOST') or '127.0.0.1'
_mongo_port = os.getenv('MONGO_PORT') or 27017
mongo_port = int(_mongo_port)
connect('Authorization_0x0199', host=mongo_host, port=mongo_port)
@handler(signals.pre_save)
def update_modified(sender, document):
document.modified = datetime.datetime.now()
@update_modified.apply
class AuthGroup(Document):
creator = StringField(max_length=64, required=True)
role = StringField(max_length=32, unique_with='creator', required=True)
description = StringField(max_length=256)
is_active = BooleanField(default=True)
date_created = DateTimeField(default=datetime.datetime.now())
modified = DateTimeField()
def __repr__(self):
return '{}: <{}>'.format(
self.__class__.__name__,
self.role
)
@update_modified.apply
class AuthMembership(Document):
user = StringField(max_length=64, unique_with='creator', required=True)
creator = StringField(max_length=64, required=True)
groups = ListField(ReferenceField(AuthGroup))
is_active = BooleanField(default=True)
date_created = DateTimeField(default=datetime.datetime.now())
modified = DateTimeField()
def __repr__(self):
return '{}: <{}>'.format(
self.__class__.__name__,
self.user
)
'''
AuthPermission:
name: can_read_asset_09a8sd08asd09as8d0as
group: reference to group
existance of a record means there is permission.
'''
@update_modified.apply
class AuthPermission(Document):
name = StringField(max_length=64, unique_with='creator', required=True)
creator = StringField(max_length=64, required=True)
groups = ListField(ReferenceField(AuthGroup, required=True))
is_active = BooleanField(default=True)
date_created = DateTimeField(default=datetime.datetime.now())
modified = DateTimeField()
def __repr__(self):
return '{}: <{}>'.format(
self.__class__.__name__,
self.name
)
|
ourway/auth
|
auth/CAS/REST/service.py
|
stringify
|
python
|
def stringify(req, resp):
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500
|
dumps all valid jsons
This is the latest after hook
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/REST/service.py#L71-L80
| null |
#try:
# import eventlet
# eventlet.monkey_patch()
#except:
# pass
import falcon
import json
try:
import ujson as json
except ImportError:
pass
from auth.CAS.authorization import Authorization
class AuthComponent(object):
def process_request(self, req, resp):
"""Process the request before routing it.
Args:
req: Request object that will eventually be
routed to an on_* responder method.
resp: Response object that will be routed to
the on_* responder.
"""
def process_resource(self, req, resp, resource, params):
"""Process the request after routing.
Note:
This method is only called when the request matches
a route to a resource.
Args:
req: Request object that will be passed to the
routed responder.
resp: Response object that will be passed to the
responder.
resource: Resource object to which the request was
routed.
params: A dict-like object representing any additional
params derived from the route's URI template fields,
that will be passed to the resource's responder
method as keyword arguments.
"""
def process_response(self, req, resp, resource):
"""Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
"""
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500
class Ping:
def on_get(self, req, resp):
"""Handles GET requests"""
resp.body = {'message':'PONG'}
class Membership:
def on_get(self, req, resp, client, user, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.has_membership(user, group):
resp.body={'result':True}
def on_post(self, req, resp, client, user, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.add_membership(user, group):
resp.body={'result':True}
def on_delete(self, req, resp, client, user, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.del_membership(user, group):
resp.body={'result':True}
class Permission:
def on_get(self, req, resp, client, group, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.has_permission(group, name):
resp.body={'result':True}
def on_post(self, req, resp, client, group, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.add_permission(group, name):
resp.body={'result':True}
def on_delete(self, req, resp, client, group, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.del_permission(group, name):
resp.body={'result':True}
class UserPermission:
def on_get(self, req, resp, client, user, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.user_has_permission(user,name):
resp.body={'result':True}
class GetUserPermissions:
def on_get(self, req, resp, client, user):
cas = Authorization(client)
resp.body = {'results': cas.get_user_permissions(user)}
class GetRolePermissions:
def on_get(self, req, resp, client, role):
cas = Authorization(client)
resp.body = {'results': cas.get_permissions(role)}
class GetRoleMembers:
def on_get(self, req, resp, client, role):
cas = Authorization(client)
resp.body = {'result': cas.get_role_members(role)}
class GetUserRoles:
def on_get(self, req, resp, client, user):
cas = Authorization(client)
resp.body = {'result': cas.get_user_roles(user)}
class ListRoles:
def on_get(self, req, resp, client):
cas = Authorization(client)
resp.body = {'result':cas.roles}
class WhichRolesCan:
def on_get(self, req, resp, client, name):
cas = Authorization(client)
resp.body = {'result':cas.which_roles_can(name)}
class WhichUsersCan:
def on_get(self, req, resp, client, name):
cas = Authorization(client)
resp.body = {'result':cas.which_users_can(name)}
class Role:
def on_post(self, req, resp, client, role):
cas = Authorization(client)
resp.body={'result':False}
if cas.add_role(role):
resp.body={'result':True}
def on_delete(self, req, resp, client, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.del_role(group):
resp.body={'result':True}
api = falcon.API(middleware=[AuthComponent()])
api.add_route('/ping', Ping())
api.add_route('/api/membership/{client}/{user}/{group}', Membership()) ## POST DELETE GET
api.add_route('/api/permission/{client}/{group}/{name}', Permission()) ## POST DELETE GET
api.add_route('/api/has_permission/{client}/{user}/{name}', UserPermission()) ## GET
api.add_route('/api/user_permissions/{client}/{user}', GetUserPermissions()) ## GET
api.add_route('/api/role_permissions/{client}/{role}', GetRolePermissions()) ## GET
api.add_route('/api/user_roles/{client}/{user}', GetUserRoles()) ## GET
api.add_route('/api/members/{client}/{role}', GetRoleMembers()) ## GET
api.add_route('/api/role/{client}/{role}', Role()) ## POST DELETE
api.add_route('/api/roles/{client}', ListRoles()) ## GET
api.add_route('/api/which_roles_can/{client}/{name}', WhichRolesCan()) ## GET
api.add_route('/api/which_users_can/{client}/{name}', WhichUsersCan()) ## GET
|
ourway/auth
|
auth/CAS/REST/service.py
|
AuthComponent.process_response
|
python
|
def process_response(self, req, resp, resource):
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500
|
Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
|
train
|
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/REST/service.py#L49-L63
| null |
class AuthComponent(object):
def process_request(self, req, resp):
"""Process the request before routing it.
Args:
req: Request object that will eventually be
routed to an on_* responder method.
resp: Response object that will be routed to
the on_* responder.
"""
def process_resource(self, req, resp, resource, params):
"""Process the request after routing.
Note:
This method is only called when the request matches
a route to a resource.
Args:
req: Request object that will be passed to the
routed responder.
resp: Response object that will be passed to the
responder.
resource: Resource object to which the request was
routed.
params: A dict-like object representing any additional
params derived from the route's URI template fields,
that will be passed to the resource's responder
method as keyword arguments.
"""
|
storax/jinjaapidoc
|
src/jinjaapidoc/gendoc.py
|
prepare_dir
|
python
|
def prepare_dir(app, directory, delete=False):
logger.info("Preparing output directories for jinjaapidoc.")
if os.path.exists(directory):
if delete:
logger.debug("Deleting dir %s", directory)
shutil.rmtree(directory)
logger.debug("Creating dir %s", directory)
os.mkdir(directory)
else:
logger.debug("Creating %s", directory)
os.mkdir(directory)
|
Create apidoc dir, delete contents if delete is True.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
|
train
|
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/gendoc.py#L38-L60
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a modification of sphinx.apidoc by David.Zuber.
It uses jinja templates to render the rst files.
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx.
This is derived form the "sphinx-apidoc" script, which is:
Copyright 2007-2014 by the Sphinx team, see http://sphinx-doc.org/latest/authors.html.
"""
import os
import inspect
import pkgutil
import pkg_resources
import shutil
import jinja2
from sphinx.util.osutil import walk
from sphinx.util import logging
from sphinx.ext import autosummary
logger = logging.getLogger(__name__)
INITPY = '__init__.py'
PY_SUFFIXES = set(['.py', '.pyx'])
TEMPLATE_DIR = 'templates'
"""Built-in template dir for jinjaapi rendering"""
AUTOSUMMARYTEMPLATE_DIR = 'autosummarytemplates'
"""Templates for autosummary"""
MODULE_TEMPLATE_NAME = 'jinjaapi_module.rst'
"""Name of the template that is used for rendering modules."""
PACKAGE_TEMPLATE_NAME = 'jinjaapi_package.rst'
"""Name of the template that is used for rendering packages."""
def make_loader(template_dirs):
"""Return a new :class:`jinja2.FileSystemLoader` that uses the template_dirs
:param template_dirs: directories to search for templates
:type template_dirs: None | :class:`list`
:returns: a new loader
:rtype: :class:`jinja2.FileSystemLoader`
:raises: None
"""
return jinja2.FileSystemLoader(searchpath=template_dirs)
def make_environment(loader):
"""Return a new :class:`jinja2.Environment` with the given loader
:param loader: a jinja2 loader
:type loader: :class:`jinja2.BaseLoader`
:returns: a new environment
:rtype: :class:`jinja2.Environment`
:raises: None
"""
return jinja2.Environment(loader=loader)
def makename(package, module):
"""Join package and module with a dot.
Package or Module can be empty.
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:returns: the joined name
:rtype: :class:`str`
:raises: :class:`AssertionError`, if both package and module are empty
"""
# Both package and module can be None/empty.
assert package or module, "Specify either package or module"
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(app, name, text, dest, suffix, dryrun, force):
"""Write the output file for module/package <name>.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param name: the file name without file extension
:type name: :class:`str`
:param text: the content of the file
:type text: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
fname = os.path.join(dest, '%s.%s' % (name, suffix))
if dryrun:
logger.info('Would create file %s.' % fname)
return
if not force and os.path.isfile(fname):
logger.info('File %s already exists, skipping.' % fname)
else:
logger.info('Creating file %s.' % fname)
f = open(fname, 'w')
try:
f.write(text)
relpath = os.path.relpath(fname, start=app.env.srcdir)
abspath = os.sep + relpath
docpath = app.env.relfn2path(abspath)[0]
docpath = docpath.rsplit(os.path.extsep, 1)[0]
logger.debug('Adding document %s' % docpath)
app.env.found_docs.add(docpath)
finally:
f.close()
def import_name(app, name):
"""Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None
"""
try:
logger.debug('Importing %r', name)
name, obj = autosummary.import_by_name(name)[:2]
logger.debug('Imported %s', obj)
return obj
except ImportError as e:
logger.warn("Jinjapidoc failed to import %r: %s", name, e)
def get_members(app, mod, typ, include_public=None):
"""Return the members of mod of the given type
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param mod: the module with members
:type mod: module
:param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'``
:type typ: str
:param include_public: list of private members to include to publics
:type include_public: list | None
:returns: None
:rtype: None
:raises: None
"""
def include_here(x):
"""Return true if the member should be included in mod.
A member will be included if it is declared in this module or package.
If the `jinjaapidoc_include_from_all` option is `True` then the member
can also be included if it is listed in `__all__`.
:param x: The member
:type x: A class, exception, or function.
:returns: True if the member should be included in mod. False otherwise.
:rtype: bool
"""
return (x.__module__ == mod.__name__ or (include_from_all and x.__name__ in all_list))
all_list = getattr(mod, '__all__', [])
include_from_all = app.config.jinjaapi_include_from_all
include_public = include_public or []
tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and include_here(x),
'function': lambda x: inspect.isfunction(x) and include_here(x),
'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and include_here(x),
'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x),
'members': lambda x: True}
items = []
for name in dir(mod):
i = getattr(mod, name)
inspect.ismodule(i)
if tests.get(typ, lambda x: False)(i):
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
logger.debug('Got members of %s of type %s: public %s and %s', mod, typ, public, items)
return public, items
def _get_submodules(app, module):
"""Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError
"""
if inspect.ismodule(module):
if hasattr(module, '__path__'):
p = module.__path__
else:
return []
elif isinstance(module, str):
p = module
else:
raise TypeError("Only Module or String accepted. %s given." % type(module))
logger.debug('Getting submodules of %s', p)
submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)]
logger.debug('Found submodules of %s: %s', module, submodules)
return submodules
def get_submodules(app, module):
"""Get all submodules without packages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names excluding packages
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if not ispkg]
def get_subpackages(app, module):
"""Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if ispkg]
def get_context(app, package, module, fullname):
"""Return a dict for template rendering
Variables:
* :package: The top package
* :module: the module
* :fullname: package.module
* :subpkgs: packages beneath module
* :submods: modules beneath module
* :classes: public classes in module
* :allclasses: public and private classes in module
* :exceptions: public exceptions in module
* :allexceptions: public and private exceptions in module
* :functions: public functions in module
* :allfunctions: public and private functions in module
* :data: public data in module
* :alldata: public and private data in module
* :members: dir(module)
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param package: the parent package name
:type package: str
:param module: the module name
:type module: str
:param fullname: package.module
:type fullname: str
:returns: a dict with variables for template rendering
:rtype: :class:`dict`
:raises: None
"""
var = {'package': package,
'module': module,
'fullname': fullname}
logger.debug('Creating context for: package %s, module %s, fullname %s', package, module, fullname)
obj = import_name(app, fullname)
if not obj:
for k in ('subpkgs', 'submods', 'classes', 'allclasses',
'exceptions', 'allexceptions', 'functions', 'allfunctions',
'data', 'alldata', 'memebers'):
var[k] = []
return var
var['subpkgs'] = get_subpackages(app, obj)
var['submods'] = get_submodules(app, obj)
var['classes'], var['allclasses'] = get_members(app, obj, 'class')
var['exceptions'], var['allexceptions'] = get_members(app, obj, 'exception')
var['functions'], var['allfunctions'] = get_members(app, obj, 'function')
var['data'], var['alldata'] = get_members(app, obj, 'data')
var['members'] = get_members(app, obj, 'members')
logger.debug('Created context: %s', var)
return var
def create_module_file(app, env, package, module, dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create module file: package %s, module %s', package, module)
template_file = MODULE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(package, module)
var = get_context(app, package, module, fn)
var['ispkg'] = False
rendered = template.render(var)
write_file(app, makename(package, module), rendered, dest, suffix, dryrun, force)
def create_package_file(app, env, root_package, sub_package, private,
dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param root_package: the parent package
:type root_package: :class:`str`
:param sub_package: the package name without root
:type sub_package: :class:`str`
:param private: Include \"_private\" modules
:type private: :class:`bool`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create package file: rootpackage %s, sub_package %s', root_package, sub_package)
template_file = PACKAGE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(root_package, sub_package)
var = get_context(app, root_package, sub_package, fn)
var['ispkg'] = True
for submod in var['submods']:
if shall_skip(app, submod, private):
continue
create_module_file(app, env, fn, submod, dest, suffix, dryrun, force)
rendered = template.render(var)
write_file(app, fn, rendered, dest, suffix, dryrun, force)
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False
def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
"""Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
"""
# check if the base directory is a package and get its name
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504
not is_excluded(os.path.join(root, f), excludes))
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != src:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if private:
exclude_prefixes = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not
is_excluded(os.path.join(root, sub), excludes))
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(app, os.path.join(root, INITPY), private):
subpackage = root[len(src):].lstrip(os.path.sep).\
replace(os.path.sep, '.')
create_package_file(app, env, root_package, subpackage,
private, dest, suffix, dryrun, force)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels
def normalize_excludes(excludes):
"""Normalize the excluded directory list."""
return [os.path.normpath(os.path.abspath(exclude)) for exclude in excludes]
def is_excluded(root, excludes):
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
root = os.path.normpath(root)
for exclude in excludes:
if root == exclude:
return True
return False
def generate(app, src, dest, exclude=[], followlinks=False,
force=False, dryrun=False, private=False, suffix='rst',
template_dirs=None):
"""Generage the rst files
Raises an :class:`OSError` if the source path is not a directory.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param src: path to python source files
:type src: :class:`str`
:param dest: output directory
:type dest: :class:`str`
:param exclude: list of paths to exclude
:type exclude: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not create any files
:type dryrun: :class:`bool`
:param private: include \"_private\" modules
:type private: :class:`bool`
:param suffix: file suffix
:type suffix: :class:`str`
:param template_dirs: directories to search for user templates
:type template_dirs: None | :class:`list`
:returns: None
:rtype: None
:raises: OSError
"""
suffix = suffix.strip('.')
if not os.path.isdir(src):
raise OSError("%s is not a directory" % src)
if not os.path.isdir(dest) and not dryrun:
os.makedirs(dest)
src = os.path.normpath(os.path.abspath(src))
exclude = normalize_excludes(exclude)
loader = make_loader(template_dirs)
env = make_environment(loader)
recurse_tree(app, env, src, dest, exclude, followlinks, force, dryrun, private, suffix)
def main(app):
"""Parse the config of the app and initiate the generation process
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
c = app.config
src = c.jinjaapi_srcdir
if not src:
return
suffix = "rst"
out = c.jinjaapi_outputdir or app.env.srcdir
if c.jinjaapi_addsummarytemplate:
tpath = pkg_resources.resource_filename(__package__, AUTOSUMMARYTEMPLATE_DIR)
c.templates_path.append(tpath)
tpath = pkg_resources.resource_filename(__package__, TEMPLATE_DIR)
c.templates_path.append(tpath)
prepare_dir(app, out, not c.jinjaapi_nodelete)
generate(app, src, out,
exclude=c.jinjaapi_exclude_paths,
force=c.jinjaapi_force,
followlinks=c.jinjaapi_followlinks,
dryrun=c.jinjaapi_dryrun,
private=c.jinjaapi_includeprivate,
suffix=suffix,
template_dirs=c.templates_path)
|
storax/jinjaapidoc
|
src/jinjaapidoc/gendoc.py
|
makename
|
python
|
def makename(package, module):
# Both package and module can be None/empty.
assert package or module, "Specify either package or module"
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
|
Join package and module with a dot.
Package or Module can be empty.
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:returns: the joined name
:rtype: :class:`str`
:raises: :class:`AssertionError`, if both package and module are empty
|
train
|
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/gendoc.py#L87-L108
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a modification of sphinx.apidoc by David.Zuber.
It uses jinja templates to render the rst files.
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx.
This is derived form the "sphinx-apidoc" script, which is:
Copyright 2007-2014 by the Sphinx team, see http://sphinx-doc.org/latest/authors.html.
"""
import os
import inspect
import pkgutil
import pkg_resources
import shutil
import jinja2
from sphinx.util.osutil import walk
from sphinx.util import logging
from sphinx.ext import autosummary
logger = logging.getLogger(__name__)
INITPY = '__init__.py'
PY_SUFFIXES = set(['.py', '.pyx'])
TEMPLATE_DIR = 'templates'
"""Built-in template dir for jinjaapi rendering"""
AUTOSUMMARYTEMPLATE_DIR = 'autosummarytemplates'
"""Templates for autosummary"""
MODULE_TEMPLATE_NAME = 'jinjaapi_module.rst'
"""Name of the template that is used for rendering modules."""
PACKAGE_TEMPLATE_NAME = 'jinjaapi_package.rst'
"""Name of the template that is used for rendering packages."""
def prepare_dir(app, directory, delete=False):
"""Create apidoc dir, delete contents if delete is True.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
"""
logger.info("Preparing output directories for jinjaapidoc.")
if os.path.exists(directory):
if delete:
logger.debug("Deleting dir %s", directory)
shutil.rmtree(directory)
logger.debug("Creating dir %s", directory)
os.mkdir(directory)
else:
logger.debug("Creating %s", directory)
os.mkdir(directory)
def make_loader(template_dirs):
"""Return a new :class:`jinja2.FileSystemLoader` that uses the template_dirs
:param template_dirs: directories to search for templates
:type template_dirs: None | :class:`list`
:returns: a new loader
:rtype: :class:`jinja2.FileSystemLoader`
:raises: None
"""
return jinja2.FileSystemLoader(searchpath=template_dirs)
def make_environment(loader):
"""Return a new :class:`jinja2.Environment` with the given loader
:param loader: a jinja2 loader
:type loader: :class:`jinja2.BaseLoader`
:returns: a new environment
:rtype: :class:`jinja2.Environment`
:raises: None
"""
return jinja2.Environment(loader=loader)
def write_file(app, name, text, dest, suffix, dryrun, force):
"""Write the output file for module/package <name>.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param name: the file name without file extension
:type name: :class:`str`
:param text: the content of the file
:type text: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
fname = os.path.join(dest, '%s.%s' % (name, suffix))
if dryrun:
logger.info('Would create file %s.' % fname)
return
if not force and os.path.isfile(fname):
logger.info('File %s already exists, skipping.' % fname)
else:
logger.info('Creating file %s.' % fname)
f = open(fname, 'w')
try:
f.write(text)
relpath = os.path.relpath(fname, start=app.env.srcdir)
abspath = os.sep + relpath
docpath = app.env.relfn2path(abspath)[0]
docpath = docpath.rsplit(os.path.extsep, 1)[0]
logger.debug('Adding document %s' % docpath)
app.env.found_docs.add(docpath)
finally:
f.close()
def import_name(app, name):
"""Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None
"""
try:
logger.debug('Importing %r', name)
name, obj = autosummary.import_by_name(name)[:2]
logger.debug('Imported %s', obj)
return obj
except ImportError as e:
logger.warn("Jinjapidoc failed to import %r: %s", name, e)
def get_members(app, mod, typ, include_public=None):
"""Return the members of mod of the given type
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param mod: the module with members
:type mod: module
:param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'``
:type typ: str
:param include_public: list of private members to include to publics
:type include_public: list | None
:returns: None
:rtype: None
:raises: None
"""
def include_here(x):
"""Return true if the member should be included in mod.
A member will be included if it is declared in this module or package.
If the `jinjaapidoc_include_from_all` option is `True` then the member
can also be included if it is listed in `__all__`.
:param x: The member
:type x: A class, exception, or function.
:returns: True if the member should be included in mod. False otherwise.
:rtype: bool
"""
return (x.__module__ == mod.__name__ or (include_from_all and x.__name__ in all_list))
all_list = getattr(mod, '__all__', [])
include_from_all = app.config.jinjaapi_include_from_all
include_public = include_public or []
tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and include_here(x),
'function': lambda x: inspect.isfunction(x) and include_here(x),
'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and include_here(x),
'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x),
'members': lambda x: True}
items = []
for name in dir(mod):
i = getattr(mod, name)
inspect.ismodule(i)
if tests.get(typ, lambda x: False)(i):
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
logger.debug('Got members of %s of type %s: public %s and %s', mod, typ, public, items)
return public, items
def _get_submodules(app, module):
"""Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError
"""
if inspect.ismodule(module):
if hasattr(module, '__path__'):
p = module.__path__
else:
return []
elif isinstance(module, str):
p = module
else:
raise TypeError("Only Module or String accepted. %s given." % type(module))
logger.debug('Getting submodules of %s', p)
submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)]
logger.debug('Found submodules of %s: %s', module, submodules)
return submodules
def get_submodules(app, module):
"""Get all submodules without packages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names excluding packages
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if not ispkg]
def get_subpackages(app, module):
"""Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if ispkg]
def get_context(app, package, module, fullname):
"""Return a dict for template rendering
Variables:
* :package: The top package
* :module: the module
* :fullname: package.module
* :subpkgs: packages beneath module
* :submods: modules beneath module
* :classes: public classes in module
* :allclasses: public and private classes in module
* :exceptions: public exceptions in module
* :allexceptions: public and private exceptions in module
* :functions: public functions in module
* :allfunctions: public and private functions in module
* :data: public data in module
* :alldata: public and private data in module
* :members: dir(module)
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param package: the parent package name
:type package: str
:param module: the module name
:type module: str
:param fullname: package.module
:type fullname: str
:returns: a dict with variables for template rendering
:rtype: :class:`dict`
:raises: None
"""
var = {'package': package,
'module': module,
'fullname': fullname}
logger.debug('Creating context for: package %s, module %s, fullname %s', package, module, fullname)
obj = import_name(app, fullname)
if not obj:
for k in ('subpkgs', 'submods', 'classes', 'allclasses',
'exceptions', 'allexceptions', 'functions', 'allfunctions',
'data', 'alldata', 'memebers'):
var[k] = []
return var
var['subpkgs'] = get_subpackages(app, obj)
var['submods'] = get_submodules(app, obj)
var['classes'], var['allclasses'] = get_members(app, obj, 'class')
var['exceptions'], var['allexceptions'] = get_members(app, obj, 'exception')
var['functions'], var['allfunctions'] = get_members(app, obj, 'function')
var['data'], var['alldata'] = get_members(app, obj, 'data')
var['members'] = get_members(app, obj, 'members')
logger.debug('Created context: %s', var)
return var
def create_module_file(app, env, package, module, dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create module file: package %s, module %s', package, module)
template_file = MODULE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(package, module)
var = get_context(app, package, module, fn)
var['ispkg'] = False
rendered = template.render(var)
write_file(app, makename(package, module), rendered, dest, suffix, dryrun, force)
def create_package_file(app, env, root_package, sub_package, private,
dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param root_package: the parent package
:type root_package: :class:`str`
:param sub_package: the package name without root
:type sub_package: :class:`str`
:param private: Include \"_private\" modules
:type private: :class:`bool`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create package file: rootpackage %s, sub_package %s', root_package, sub_package)
template_file = PACKAGE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(root_package, sub_package)
var = get_context(app, root_package, sub_package, fn)
var['ispkg'] = True
for submod in var['submods']:
if shall_skip(app, submod, private):
continue
create_module_file(app, env, fn, submod, dest, suffix, dryrun, force)
rendered = template.render(var)
write_file(app, fn, rendered, dest, suffix, dryrun, force)
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False
def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
"""Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
"""
# check if the base directory is a package and get its name
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504
not is_excluded(os.path.join(root, f), excludes))
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != src:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if private:
exclude_prefixes = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not
is_excluded(os.path.join(root, sub), excludes))
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(app, os.path.join(root, INITPY), private):
subpackage = root[len(src):].lstrip(os.path.sep).\
replace(os.path.sep, '.')
create_package_file(app, env, root_package, subpackage,
private, dest, suffix, dryrun, force)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels
def normalize_excludes(excludes):
"""Normalize the excluded directory list."""
return [os.path.normpath(os.path.abspath(exclude)) for exclude in excludes]
def is_excluded(root, excludes):
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
root = os.path.normpath(root)
for exclude in excludes:
if root == exclude:
return True
return False
def generate(app, src, dest, exclude=[], followlinks=False,
force=False, dryrun=False, private=False, suffix='rst',
template_dirs=None):
"""Generage the rst files
Raises an :class:`OSError` if the source path is not a directory.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param src: path to python source files
:type src: :class:`str`
:param dest: output directory
:type dest: :class:`str`
:param exclude: list of paths to exclude
:type exclude: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not create any files
:type dryrun: :class:`bool`
:param private: include \"_private\" modules
:type private: :class:`bool`
:param suffix: file suffix
:type suffix: :class:`str`
:param template_dirs: directories to search for user templates
:type template_dirs: None | :class:`list`
:returns: None
:rtype: None
:raises: OSError
"""
suffix = suffix.strip('.')
if not os.path.isdir(src):
raise OSError("%s is not a directory" % src)
if not os.path.isdir(dest) and not dryrun:
os.makedirs(dest)
src = os.path.normpath(os.path.abspath(src))
exclude = normalize_excludes(exclude)
loader = make_loader(template_dirs)
env = make_environment(loader)
recurse_tree(app, env, src, dest, exclude, followlinks, force, dryrun, private, suffix)
def main(app):
"""Parse the config of the app and initiate the generation process
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
c = app.config
src = c.jinjaapi_srcdir
if not src:
return
suffix = "rst"
out = c.jinjaapi_outputdir or app.env.srcdir
if c.jinjaapi_addsummarytemplate:
tpath = pkg_resources.resource_filename(__package__, AUTOSUMMARYTEMPLATE_DIR)
c.templates_path.append(tpath)
tpath = pkg_resources.resource_filename(__package__, TEMPLATE_DIR)
c.templates_path.append(tpath)
prepare_dir(app, out, not c.jinjaapi_nodelete)
generate(app, src, out,
exclude=c.jinjaapi_exclude_paths,
force=c.jinjaapi_force,
followlinks=c.jinjaapi_followlinks,
dryrun=c.jinjaapi_dryrun,
private=c.jinjaapi_includeprivate,
suffix=suffix,
template_dirs=c.templates_path)
|
storax/jinjaapidoc
|
src/jinjaapidoc/gendoc.py
|
write_file
|
python
|
def write_file(app, name, text, dest, suffix, dryrun, force):
fname = os.path.join(dest, '%s.%s' % (name, suffix))
if dryrun:
logger.info('Would create file %s.' % fname)
return
if not force and os.path.isfile(fname):
logger.info('File %s already exists, skipping.' % fname)
else:
logger.info('Creating file %s.' % fname)
f = open(fname, 'w')
try:
f.write(text)
relpath = os.path.relpath(fname, start=app.env.srcdir)
abspath = os.sep + relpath
docpath = app.env.relfn2path(abspath)[0]
docpath = docpath.rsplit(os.path.extsep, 1)[0]
logger.debug('Adding document %s' % docpath)
app.env.found_docs.add(docpath)
finally:
f.close()
|
Write the output file for module/package <name>.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param name: the file name without file extension
:type name: :class:`str`
:param text: the content of the file
:type text: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
|
train
|
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/gendoc.py#L111-L149
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a modification of sphinx.apidoc by David.Zuber.
It uses jinja templates to render the rst files.
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx.
This is derived form the "sphinx-apidoc" script, which is:
Copyright 2007-2014 by the Sphinx team, see http://sphinx-doc.org/latest/authors.html.
"""
import os
import inspect
import pkgutil
import pkg_resources
import shutil
import jinja2
from sphinx.util.osutil import walk
from sphinx.util import logging
from sphinx.ext import autosummary
logger = logging.getLogger(__name__)
INITPY = '__init__.py'
PY_SUFFIXES = set(['.py', '.pyx'])
TEMPLATE_DIR = 'templates'
"""Built-in template dir for jinjaapi rendering"""
AUTOSUMMARYTEMPLATE_DIR = 'autosummarytemplates'
"""Templates for autosummary"""
MODULE_TEMPLATE_NAME = 'jinjaapi_module.rst'
"""Name of the template that is used for rendering modules."""
PACKAGE_TEMPLATE_NAME = 'jinjaapi_package.rst'
"""Name of the template that is used for rendering packages."""
def prepare_dir(app, directory, delete=False):
"""Create apidoc dir, delete contents if delete is True.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
"""
logger.info("Preparing output directories for jinjaapidoc.")
if os.path.exists(directory):
if delete:
logger.debug("Deleting dir %s", directory)
shutil.rmtree(directory)
logger.debug("Creating dir %s", directory)
os.mkdir(directory)
else:
logger.debug("Creating %s", directory)
os.mkdir(directory)
def make_loader(template_dirs):
"""Return a new :class:`jinja2.FileSystemLoader` that uses the template_dirs
:param template_dirs: directories to search for templates
:type template_dirs: None | :class:`list`
:returns: a new loader
:rtype: :class:`jinja2.FileSystemLoader`
:raises: None
"""
return jinja2.FileSystemLoader(searchpath=template_dirs)
def make_environment(loader):
"""Return a new :class:`jinja2.Environment` with the given loader
:param loader: a jinja2 loader
:type loader: :class:`jinja2.BaseLoader`
:returns: a new environment
:rtype: :class:`jinja2.Environment`
:raises: None
"""
return jinja2.Environment(loader=loader)
def makename(package, module):
"""Join package and module with a dot.
Package or Module can be empty.
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:returns: the joined name
:rtype: :class:`str`
:raises: :class:`AssertionError`, if both package and module are empty
"""
# Both package and module can be None/empty.
assert package or module, "Specify either package or module"
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def import_name(app, name):
"""Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None
"""
try:
logger.debug('Importing %r', name)
name, obj = autosummary.import_by_name(name)[:2]
logger.debug('Imported %s', obj)
return obj
except ImportError as e:
logger.warn("Jinjapidoc failed to import %r: %s", name, e)
def get_members(app, mod, typ, include_public=None):
"""Return the members of mod of the given type
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param mod: the module with members
:type mod: module
:param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'``
:type typ: str
:param include_public: list of private members to include to publics
:type include_public: list | None
:returns: None
:rtype: None
:raises: None
"""
def include_here(x):
"""Return true if the member should be included in mod.
A member will be included if it is declared in this module or package.
If the `jinjaapidoc_include_from_all` option is `True` then the member
can also be included if it is listed in `__all__`.
:param x: The member
:type x: A class, exception, or function.
:returns: True if the member should be included in mod. False otherwise.
:rtype: bool
"""
return (x.__module__ == mod.__name__ or (include_from_all and x.__name__ in all_list))
all_list = getattr(mod, '__all__', [])
include_from_all = app.config.jinjaapi_include_from_all
include_public = include_public or []
tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and include_here(x),
'function': lambda x: inspect.isfunction(x) and include_here(x),
'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and include_here(x),
'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x),
'members': lambda x: True}
items = []
for name in dir(mod):
i = getattr(mod, name)
inspect.ismodule(i)
if tests.get(typ, lambda x: False)(i):
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
logger.debug('Got members of %s of type %s: public %s and %s', mod, typ, public, items)
return public, items
def _get_submodules(app, module):
"""Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError
"""
if inspect.ismodule(module):
if hasattr(module, '__path__'):
p = module.__path__
else:
return []
elif isinstance(module, str):
p = module
else:
raise TypeError("Only Module or String accepted. %s given." % type(module))
logger.debug('Getting submodules of %s', p)
submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)]
logger.debug('Found submodules of %s: %s', module, submodules)
return submodules
def get_submodules(app, module):
"""Get all submodules without packages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names excluding packages
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if not ispkg]
def get_subpackages(app, module):
"""Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if ispkg]
def get_context(app, package, module, fullname):
"""Return a dict for template rendering
Variables:
* :package: The top package
* :module: the module
* :fullname: package.module
* :subpkgs: packages beneath module
* :submods: modules beneath module
* :classes: public classes in module
* :allclasses: public and private classes in module
* :exceptions: public exceptions in module
* :allexceptions: public and private exceptions in module
* :functions: public functions in module
* :allfunctions: public and private functions in module
* :data: public data in module
* :alldata: public and private data in module
* :members: dir(module)
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param package: the parent package name
:type package: str
:param module: the module name
:type module: str
:param fullname: package.module
:type fullname: str
:returns: a dict with variables for template rendering
:rtype: :class:`dict`
:raises: None
"""
var = {'package': package,
'module': module,
'fullname': fullname}
logger.debug('Creating context for: package %s, module %s, fullname %s', package, module, fullname)
obj = import_name(app, fullname)
if not obj:
for k in ('subpkgs', 'submods', 'classes', 'allclasses',
'exceptions', 'allexceptions', 'functions', 'allfunctions',
'data', 'alldata', 'memebers'):
var[k] = []
return var
var['subpkgs'] = get_subpackages(app, obj)
var['submods'] = get_submodules(app, obj)
var['classes'], var['allclasses'] = get_members(app, obj, 'class')
var['exceptions'], var['allexceptions'] = get_members(app, obj, 'exception')
var['functions'], var['allfunctions'] = get_members(app, obj, 'function')
var['data'], var['alldata'] = get_members(app, obj, 'data')
var['members'] = get_members(app, obj, 'members')
logger.debug('Created context: %s', var)
return var
def create_module_file(app, env, package, module, dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create module file: package %s, module %s', package, module)
template_file = MODULE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(package, module)
var = get_context(app, package, module, fn)
var['ispkg'] = False
rendered = template.render(var)
write_file(app, makename(package, module), rendered, dest, suffix, dryrun, force)
def create_package_file(app, env, root_package, sub_package, private,
dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param root_package: the parent package
:type root_package: :class:`str`
:param sub_package: the package name without root
:type sub_package: :class:`str`
:param private: Include \"_private\" modules
:type private: :class:`bool`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create package file: rootpackage %s, sub_package %s', root_package, sub_package)
template_file = PACKAGE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(root_package, sub_package)
var = get_context(app, root_package, sub_package, fn)
var['ispkg'] = True
for submod in var['submods']:
if shall_skip(app, submod, private):
continue
create_module_file(app, env, fn, submod, dest, suffix, dryrun, force)
rendered = template.render(var)
write_file(app, fn, rendered, dest, suffix, dryrun, force)
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False
def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
"""Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
"""
# check if the base directory is a package and get its name
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504
not is_excluded(os.path.join(root, f), excludes))
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != src:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if private:
exclude_prefixes = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not
is_excluded(os.path.join(root, sub), excludes))
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(app, os.path.join(root, INITPY), private):
subpackage = root[len(src):].lstrip(os.path.sep).\
replace(os.path.sep, '.')
create_package_file(app, env, root_package, subpackage,
private, dest, suffix, dryrun, force)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels
def normalize_excludes(excludes):
"""Normalize the excluded directory list."""
return [os.path.normpath(os.path.abspath(exclude)) for exclude in excludes]
def is_excluded(root, excludes):
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
root = os.path.normpath(root)
for exclude in excludes:
if root == exclude:
return True
return False
def generate(app, src, dest, exclude=[], followlinks=False,
force=False, dryrun=False, private=False, suffix='rst',
template_dirs=None):
"""Generage the rst files
Raises an :class:`OSError` if the source path is not a directory.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param src: path to python source files
:type src: :class:`str`
:param dest: output directory
:type dest: :class:`str`
:param exclude: list of paths to exclude
:type exclude: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not create any files
:type dryrun: :class:`bool`
:param private: include \"_private\" modules
:type private: :class:`bool`
:param suffix: file suffix
:type suffix: :class:`str`
:param template_dirs: directories to search for user templates
:type template_dirs: None | :class:`list`
:returns: None
:rtype: None
:raises: OSError
"""
suffix = suffix.strip('.')
if not os.path.isdir(src):
raise OSError("%s is not a directory" % src)
if not os.path.isdir(dest) and not dryrun:
os.makedirs(dest)
src = os.path.normpath(os.path.abspath(src))
exclude = normalize_excludes(exclude)
loader = make_loader(template_dirs)
env = make_environment(loader)
recurse_tree(app, env, src, dest, exclude, followlinks, force, dryrun, private, suffix)
def main(app):
"""Parse the config of the app and initiate the generation process
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
c = app.config
src = c.jinjaapi_srcdir
if not src:
return
suffix = "rst"
out = c.jinjaapi_outputdir or app.env.srcdir
if c.jinjaapi_addsummarytemplate:
tpath = pkg_resources.resource_filename(__package__, AUTOSUMMARYTEMPLATE_DIR)
c.templates_path.append(tpath)
tpath = pkg_resources.resource_filename(__package__, TEMPLATE_DIR)
c.templates_path.append(tpath)
prepare_dir(app, out, not c.jinjaapi_nodelete)
generate(app, src, out,
exclude=c.jinjaapi_exclude_paths,
force=c.jinjaapi_force,
followlinks=c.jinjaapi_followlinks,
dryrun=c.jinjaapi_dryrun,
private=c.jinjaapi_includeprivate,
suffix=suffix,
template_dirs=c.templates_path)
|
storax/jinjaapidoc
|
src/jinjaapidoc/gendoc.py
|
import_name
|
python
|
def import_name(app, name):
try:
logger.debug('Importing %r', name)
name, obj = autosummary.import_by_name(name)[:2]
logger.debug('Imported %s', obj)
return obj
except ImportError as e:
logger.warn("Jinjapidoc failed to import %r: %s", name, e)
|
Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None
|
train
|
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/gendoc.py#L152-L167
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a modification of sphinx.apidoc by David.Zuber.
It uses jinja templates to render the rst files.
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx.
This is derived form the "sphinx-apidoc" script, which is:
Copyright 2007-2014 by the Sphinx team, see http://sphinx-doc.org/latest/authors.html.
"""
import os
import inspect
import pkgutil
import pkg_resources
import shutil
import jinja2
from sphinx.util.osutil import walk
from sphinx.util import logging
from sphinx.ext import autosummary
logger = logging.getLogger(__name__)
INITPY = '__init__.py'
PY_SUFFIXES = set(['.py', '.pyx'])
TEMPLATE_DIR = 'templates'
"""Built-in template dir for jinjaapi rendering"""
AUTOSUMMARYTEMPLATE_DIR = 'autosummarytemplates'
"""Templates for autosummary"""
MODULE_TEMPLATE_NAME = 'jinjaapi_module.rst'
"""Name of the template that is used for rendering modules."""
PACKAGE_TEMPLATE_NAME = 'jinjaapi_package.rst'
"""Name of the template that is used for rendering packages."""
def prepare_dir(app, directory, delete=False):
"""Create apidoc dir, delete contents if delete is True.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
"""
logger.info("Preparing output directories for jinjaapidoc.")
if os.path.exists(directory):
if delete:
logger.debug("Deleting dir %s", directory)
shutil.rmtree(directory)
logger.debug("Creating dir %s", directory)
os.mkdir(directory)
else:
logger.debug("Creating %s", directory)
os.mkdir(directory)
def make_loader(template_dirs):
"""Return a new :class:`jinja2.FileSystemLoader` that uses the template_dirs
:param template_dirs: directories to search for templates
:type template_dirs: None | :class:`list`
:returns: a new loader
:rtype: :class:`jinja2.FileSystemLoader`
:raises: None
"""
return jinja2.FileSystemLoader(searchpath=template_dirs)
def make_environment(loader):
"""Return a new :class:`jinja2.Environment` with the given loader
:param loader: a jinja2 loader
:type loader: :class:`jinja2.BaseLoader`
:returns: a new environment
:rtype: :class:`jinja2.Environment`
:raises: None
"""
return jinja2.Environment(loader=loader)
def makename(package, module):
"""Join package and module with a dot.
Package or Module can be empty.
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:returns: the joined name
:rtype: :class:`str`
:raises: :class:`AssertionError`, if both package and module are empty
"""
# Both package and module can be None/empty.
assert package or module, "Specify either package or module"
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(app, name, text, dest, suffix, dryrun, force):
"""Write the output file for module/package <name>.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param name: the file name without file extension
:type name: :class:`str`
:param text: the content of the file
:type text: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
fname = os.path.join(dest, '%s.%s' % (name, suffix))
if dryrun:
logger.info('Would create file %s.' % fname)
return
if not force and os.path.isfile(fname):
logger.info('File %s already exists, skipping.' % fname)
else:
logger.info('Creating file %s.' % fname)
f = open(fname, 'w')
try:
f.write(text)
relpath = os.path.relpath(fname, start=app.env.srcdir)
abspath = os.sep + relpath
docpath = app.env.relfn2path(abspath)[0]
docpath = docpath.rsplit(os.path.extsep, 1)[0]
logger.debug('Adding document %s' % docpath)
app.env.found_docs.add(docpath)
finally:
f.close()
def get_members(app, mod, typ, include_public=None):
"""Return the members of mod of the given type
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param mod: the module with members
:type mod: module
:param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'``
:type typ: str
:param include_public: list of private members to include to publics
:type include_public: list | None
:returns: None
:rtype: None
:raises: None
"""
def include_here(x):
"""Return true if the member should be included in mod.
A member will be included if it is declared in this module or package.
If the `jinjaapidoc_include_from_all` option is `True` then the member
can also be included if it is listed in `__all__`.
:param x: The member
:type x: A class, exception, or function.
:returns: True if the member should be included in mod. False otherwise.
:rtype: bool
"""
return (x.__module__ == mod.__name__ or (include_from_all and x.__name__ in all_list))
all_list = getattr(mod, '__all__', [])
include_from_all = app.config.jinjaapi_include_from_all
include_public = include_public or []
tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and include_here(x),
'function': lambda x: inspect.isfunction(x) and include_here(x),
'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and include_here(x),
'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x),
'members': lambda x: True}
items = []
for name in dir(mod):
i = getattr(mod, name)
inspect.ismodule(i)
if tests.get(typ, lambda x: False)(i):
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
logger.debug('Got members of %s of type %s: public %s and %s', mod, typ, public, items)
return public, items
def _get_submodules(app, module):
"""Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError
"""
if inspect.ismodule(module):
if hasattr(module, '__path__'):
p = module.__path__
else:
return []
elif isinstance(module, str):
p = module
else:
raise TypeError("Only Module or String accepted. %s given." % type(module))
logger.debug('Getting submodules of %s', p)
submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)]
logger.debug('Found submodules of %s: %s', module, submodules)
return submodules
def get_submodules(app, module):
"""Get all submodules without packages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names excluding packages
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if not ispkg]
def get_subpackages(app, module):
"""Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if ispkg]
def get_context(app, package, module, fullname):
"""Return a dict for template rendering
Variables:
* :package: The top package
* :module: the module
* :fullname: package.module
* :subpkgs: packages beneath module
* :submods: modules beneath module
* :classes: public classes in module
* :allclasses: public and private classes in module
* :exceptions: public exceptions in module
* :allexceptions: public and private exceptions in module
* :functions: public functions in module
* :allfunctions: public and private functions in module
* :data: public data in module
* :alldata: public and private data in module
* :members: dir(module)
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param package: the parent package name
:type package: str
:param module: the module name
:type module: str
:param fullname: package.module
:type fullname: str
:returns: a dict with variables for template rendering
:rtype: :class:`dict`
:raises: None
"""
var = {'package': package,
'module': module,
'fullname': fullname}
logger.debug('Creating context for: package %s, module %s, fullname %s', package, module, fullname)
obj = import_name(app, fullname)
if not obj:
for k in ('subpkgs', 'submods', 'classes', 'allclasses',
'exceptions', 'allexceptions', 'functions', 'allfunctions',
'data', 'alldata', 'memebers'):
var[k] = []
return var
var['subpkgs'] = get_subpackages(app, obj)
var['submods'] = get_submodules(app, obj)
var['classes'], var['allclasses'] = get_members(app, obj, 'class')
var['exceptions'], var['allexceptions'] = get_members(app, obj, 'exception')
var['functions'], var['allfunctions'] = get_members(app, obj, 'function')
var['data'], var['alldata'] = get_members(app, obj, 'data')
var['members'] = get_members(app, obj, 'members')
logger.debug('Created context: %s', var)
return var
def create_module_file(app, env, package, module, dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create module file: package %s, module %s', package, module)
template_file = MODULE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(package, module)
var = get_context(app, package, module, fn)
var['ispkg'] = False
rendered = template.render(var)
write_file(app, makename(package, module), rendered, dest, suffix, dryrun, force)
def create_package_file(app, env, root_package, sub_package, private,
dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param root_package: the parent package
:type root_package: :class:`str`
:param sub_package: the package name without root
:type sub_package: :class:`str`
:param private: Include \"_private\" modules
:type private: :class:`bool`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create package file: rootpackage %s, sub_package %s', root_package, sub_package)
template_file = PACKAGE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(root_package, sub_package)
var = get_context(app, root_package, sub_package, fn)
var['ispkg'] = True
for submod in var['submods']:
if shall_skip(app, submod, private):
continue
create_module_file(app, env, fn, submod, dest, suffix, dryrun, force)
rendered = template.render(var)
write_file(app, fn, rendered, dest, suffix, dryrun, force)
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False
def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
"""Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
"""
# check if the base directory is a package and get its name
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504
not is_excluded(os.path.join(root, f), excludes))
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != src:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if private:
exclude_prefixes = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not
is_excluded(os.path.join(root, sub), excludes))
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(app, os.path.join(root, INITPY), private):
subpackage = root[len(src):].lstrip(os.path.sep).\
replace(os.path.sep, '.')
create_package_file(app, env, root_package, subpackage,
private, dest, suffix, dryrun, force)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels
def normalize_excludes(excludes):
"""Normalize the excluded directory list."""
return [os.path.normpath(os.path.abspath(exclude)) for exclude in excludes]
def is_excluded(root, excludes):
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
root = os.path.normpath(root)
for exclude in excludes:
if root == exclude:
return True
return False
def generate(app, src, dest, exclude=[], followlinks=False,
force=False, dryrun=False, private=False, suffix='rst',
template_dirs=None):
"""Generage the rst files
Raises an :class:`OSError` if the source path is not a directory.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param src: path to python source files
:type src: :class:`str`
:param dest: output directory
:type dest: :class:`str`
:param exclude: list of paths to exclude
:type exclude: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not create any files
:type dryrun: :class:`bool`
:param private: include \"_private\" modules
:type private: :class:`bool`
:param suffix: file suffix
:type suffix: :class:`str`
:param template_dirs: directories to search for user templates
:type template_dirs: None | :class:`list`
:returns: None
:rtype: None
:raises: OSError
"""
suffix = suffix.strip('.')
if not os.path.isdir(src):
raise OSError("%s is not a directory" % src)
if not os.path.isdir(dest) and not dryrun:
os.makedirs(dest)
src = os.path.normpath(os.path.abspath(src))
exclude = normalize_excludes(exclude)
loader = make_loader(template_dirs)
env = make_environment(loader)
recurse_tree(app, env, src, dest, exclude, followlinks, force, dryrun, private, suffix)
def main(app):
"""Parse the config of the app and initiate the generation process
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
c = app.config
src = c.jinjaapi_srcdir
if not src:
return
suffix = "rst"
out = c.jinjaapi_outputdir or app.env.srcdir
if c.jinjaapi_addsummarytemplate:
tpath = pkg_resources.resource_filename(__package__, AUTOSUMMARYTEMPLATE_DIR)
c.templates_path.append(tpath)
tpath = pkg_resources.resource_filename(__package__, TEMPLATE_DIR)
c.templates_path.append(tpath)
prepare_dir(app, out, not c.jinjaapi_nodelete)
generate(app, src, out,
exclude=c.jinjaapi_exclude_paths,
force=c.jinjaapi_force,
followlinks=c.jinjaapi_followlinks,
dryrun=c.jinjaapi_dryrun,
private=c.jinjaapi_includeprivate,
suffix=suffix,
template_dirs=c.templates_path)
|
storax/jinjaapidoc
|
src/jinjaapidoc/gendoc.py
|
get_members
|
python
|
def get_members(app, mod, typ, include_public=None):
def include_here(x):
"""Return true if the member should be included in mod.
A member will be included if it is declared in this module or package.
If the `jinjaapidoc_include_from_all` option is `True` then the member
can also be included if it is listed in `__all__`.
:param x: The member
:type x: A class, exception, or function.
:returns: True if the member should be included in mod. False otherwise.
:rtype: bool
"""
return (x.__module__ == mod.__name__ or (include_from_all and x.__name__ in all_list))
all_list = getattr(mod, '__all__', [])
include_from_all = app.config.jinjaapi_include_from_all
include_public = include_public or []
tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and include_here(x),
'function': lambda x: inspect.isfunction(x) and include_here(x),
'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and include_here(x),
'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x),
'members': lambda x: True}
items = []
for name in dir(mod):
i = getattr(mod, name)
inspect.ismodule(i)
if tests.get(typ, lambda x: False)(i):
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
logger.debug('Got members of %s of type %s: public %s and %s', mod, typ, public, items)
return public, items
|
Return the members of mod of the given type
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param mod: the module with members
:type mod: module
:param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'``
:type typ: str
:param include_public: list of private members to include to publics
:type include_public: list | None
:returns: None
:rtype: None
:raises: None
|
train
|
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/gendoc.py#L170-L218
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a modification of sphinx.apidoc by David.Zuber.
It uses jinja templates to render the rst files.
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx.
This is derived form the "sphinx-apidoc" script, which is:
Copyright 2007-2014 by the Sphinx team, see http://sphinx-doc.org/latest/authors.html.
"""
import os
import inspect
import pkgutil
import pkg_resources
import shutil
import jinja2
from sphinx.util.osutil import walk
from sphinx.util import logging
from sphinx.ext import autosummary
logger = logging.getLogger(__name__)
INITPY = '__init__.py'
PY_SUFFIXES = set(['.py', '.pyx'])
TEMPLATE_DIR = 'templates'
"""Built-in template dir for jinjaapi rendering"""
AUTOSUMMARYTEMPLATE_DIR = 'autosummarytemplates'
"""Templates for autosummary"""
MODULE_TEMPLATE_NAME = 'jinjaapi_module.rst'
"""Name of the template that is used for rendering modules."""
PACKAGE_TEMPLATE_NAME = 'jinjaapi_package.rst'
"""Name of the template that is used for rendering packages."""
def prepare_dir(app, directory, delete=False):
"""Create apidoc dir, delete contents if delete is True.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
"""
logger.info("Preparing output directories for jinjaapidoc.")
if os.path.exists(directory):
if delete:
logger.debug("Deleting dir %s", directory)
shutil.rmtree(directory)
logger.debug("Creating dir %s", directory)
os.mkdir(directory)
else:
logger.debug("Creating %s", directory)
os.mkdir(directory)
def make_loader(template_dirs):
"""Return a new :class:`jinja2.FileSystemLoader` that uses the template_dirs
:param template_dirs: directories to search for templates
:type template_dirs: None | :class:`list`
:returns: a new loader
:rtype: :class:`jinja2.FileSystemLoader`
:raises: None
"""
return jinja2.FileSystemLoader(searchpath=template_dirs)
def make_environment(loader):
"""Return a new :class:`jinja2.Environment` with the given loader
:param loader: a jinja2 loader
:type loader: :class:`jinja2.BaseLoader`
:returns: a new environment
:rtype: :class:`jinja2.Environment`
:raises: None
"""
return jinja2.Environment(loader=loader)
def makename(package, module):
"""Join package and module with a dot.
Package or Module can be empty.
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:returns: the joined name
:rtype: :class:`str`
:raises: :class:`AssertionError`, if both package and module are empty
"""
# Both package and module can be None/empty.
assert package or module, "Specify either package or module"
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(app, name, text, dest, suffix, dryrun, force):
"""Write the output file for module/package <name>.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param name: the file name without file extension
:type name: :class:`str`
:param text: the content of the file
:type text: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
fname = os.path.join(dest, '%s.%s' % (name, suffix))
if dryrun:
logger.info('Would create file %s.' % fname)
return
if not force and os.path.isfile(fname):
logger.info('File %s already exists, skipping.' % fname)
else:
logger.info('Creating file %s.' % fname)
f = open(fname, 'w')
try:
f.write(text)
relpath = os.path.relpath(fname, start=app.env.srcdir)
abspath = os.sep + relpath
docpath = app.env.relfn2path(abspath)[0]
docpath = docpath.rsplit(os.path.extsep, 1)[0]
logger.debug('Adding document %s' % docpath)
app.env.found_docs.add(docpath)
finally:
f.close()
def import_name(app, name):
"""Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None
"""
try:
logger.debug('Importing %r', name)
name, obj = autosummary.import_by_name(name)[:2]
logger.debug('Imported %s', obj)
return obj
except ImportError as e:
logger.warn("Jinjapidoc failed to import %r: %s", name, e)
def _get_submodules(app, module):
"""Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError
"""
if inspect.ismodule(module):
if hasattr(module, '__path__'):
p = module.__path__
else:
return []
elif isinstance(module, str):
p = module
else:
raise TypeError("Only Module or String accepted. %s given." % type(module))
logger.debug('Getting submodules of %s', p)
submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)]
logger.debug('Found submodules of %s: %s', module, submodules)
return submodules
def get_submodules(app, module):
"""Get all submodules without packages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names excluding packages
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if not ispkg]
def get_subpackages(app, module):
"""Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError
"""
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if ispkg]
def get_context(app, package, module, fullname):
"""Return a dict for template rendering
Variables:
* :package: The top package
* :module: the module
* :fullname: package.module
* :subpkgs: packages beneath module
* :submods: modules beneath module
* :classes: public classes in module
* :allclasses: public and private classes in module
* :exceptions: public exceptions in module
* :allexceptions: public and private exceptions in module
* :functions: public functions in module
* :allfunctions: public and private functions in module
* :data: public data in module
* :alldata: public and private data in module
* :members: dir(module)
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param package: the parent package name
:type package: str
:param module: the module name
:type module: str
:param fullname: package.module
:type fullname: str
:returns: a dict with variables for template rendering
:rtype: :class:`dict`
:raises: None
"""
var = {'package': package,
'module': module,
'fullname': fullname}
logger.debug('Creating context for: package %s, module %s, fullname %s', package, module, fullname)
obj = import_name(app, fullname)
if not obj:
for k in ('subpkgs', 'submods', 'classes', 'allclasses',
'exceptions', 'allexceptions', 'functions', 'allfunctions',
'data', 'alldata', 'memebers'):
var[k] = []
return var
var['subpkgs'] = get_subpackages(app, obj)
var['submods'] = get_submodules(app, obj)
var['classes'], var['allclasses'] = get_members(app, obj, 'class')
var['exceptions'], var['allexceptions'] = get_members(app, obj, 'exception')
var['functions'], var['allfunctions'] = get_members(app, obj, 'function')
var['data'], var['alldata'] = get_members(app, obj, 'data')
var['members'] = get_members(app, obj, 'members')
logger.debug('Created context: %s', var)
return var
def create_module_file(app, env, package, module, dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param package: the package name
:type package: :class:`str`
:param module: the module name
:type module: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create module file: package %s, module %s', package, module)
template_file = MODULE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(package, module)
var = get_context(app, package, module, fn)
var['ispkg'] = False
rendered = template.render(var)
write_file(app, makename(package, module), rendered, dest, suffix, dryrun, force)
def create_package_file(app, env, root_package, sub_package, private,
dest, suffix, dryrun, force):
"""Build the text of the file and write the file.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment for the templates
:type env: :class:`jinja2.Environment`
:param root_package: the parent package
:type root_package: :class:`str`
:param sub_package: the package name without root
:type sub_package: :class:`str`
:param private: Include \"_private\" modules
:type private: :class:`bool`
:param dest: the output directory
:type dest: :class:`str`
:param suffix: the file extension
:type suffix: :class:`str`
:param dryrun: If True, do not create any files, just log the potential location.
:type dryrun: :class:`bool`
:param force: Overwrite existing files
:type force: :class:`bool`
:returns: None
:raises: None
"""
logger.debug('Create package file: rootpackage %s, sub_package %s', root_package, sub_package)
template_file = PACKAGE_TEMPLATE_NAME
template = env.get_template(template_file)
fn = makename(root_package, sub_package)
var = get_context(app, root_package, sub_package, fn)
var['ispkg'] = True
for submod in var['submods']:
if shall_skip(app, submod, private):
continue
create_module_file(app, env, fn, submod, dest, suffix, dryrun, force)
rendered = template.render(var)
write_file(app, fn, rendered, dest, suffix, dryrun, force)
def shall_skip(app, module, private):
"""Check if we want to skip this module.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module name
:type module: :class:`str`
:param private: True, if privates are allowed
:type private: :class:`bool`
"""
logger.debug('Testing if %s should be skipped.', module)
# skip if it has a "private" name and this is selected
if module != '__init__.py' and module.startswith('_') and \
not private:
logger.debug('Skip %s because its either private or __init__.', module)
return True
logger.debug('Do not skip %s', module)
return False
def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
"""Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str`
"""
# check if the base directory is a package and get its name
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504
not is_excluded(os.path.join(root, f), excludes))
is_pkg = INITPY in py_files
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != src:
# only accept non-package at toplevel
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if private:
exclude_prefixes = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not
is_excluded(os.path.join(root, sub), excludes))
if is_pkg:
# we are in a package with something to document
if subs or len(py_files) > 1 or not \
shall_skip(app, os.path.join(root, INITPY), private):
subpackage = root[len(src):].lstrip(os.path.sep).\
replace(os.path.sep, '.')
create_package_file(app, env, root_package, subpackage,
private, dest, suffix, dryrun, force)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels
def normalize_excludes(excludes):
"""Normalize the excluded directory list."""
return [os.path.normpath(os.path.abspath(exclude)) for exclude in excludes]
def is_excluded(root, excludes):
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
root = os.path.normpath(root)
for exclude in excludes:
if root == exclude:
return True
return False
def generate(app, src, dest, exclude=[], followlinks=False,
force=False, dryrun=False, private=False, suffix='rst',
template_dirs=None):
"""Generage the rst files
Raises an :class:`OSError` if the source path is not a directory.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param src: path to python source files
:type src: :class:`str`
:param dest: output directory
:type dest: :class:`str`
:param exclude: list of paths to exclude
:type exclude: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not create any files
:type dryrun: :class:`bool`
:param private: include \"_private\" modules
:type private: :class:`bool`
:param suffix: file suffix
:type suffix: :class:`str`
:param template_dirs: directories to search for user templates
:type template_dirs: None | :class:`list`
:returns: None
:rtype: None
:raises: OSError
"""
suffix = suffix.strip('.')
if not os.path.isdir(src):
raise OSError("%s is not a directory" % src)
if not os.path.isdir(dest) and not dryrun:
os.makedirs(dest)
src = os.path.normpath(os.path.abspath(src))
exclude = normalize_excludes(exclude)
loader = make_loader(template_dirs)
env = make_environment(loader)
recurse_tree(app, env, src, dest, exclude, followlinks, force, dryrun, private, suffix)
def main(app):
"""Parse the config of the app and initiate the generation process
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:returns: None
:rtype: None
:raises: None
"""
c = app.config
src = c.jinjaapi_srcdir
if not src:
return
suffix = "rst"
out = c.jinjaapi_outputdir or app.env.srcdir
if c.jinjaapi_addsummarytemplate:
tpath = pkg_resources.resource_filename(__package__, AUTOSUMMARYTEMPLATE_DIR)
c.templates_path.append(tpath)
tpath = pkg_resources.resource_filename(__package__, TEMPLATE_DIR)
c.templates_path.append(tpath)
prepare_dir(app, out, not c.jinjaapi_nodelete)
generate(app, src, out,
exclude=c.jinjaapi_exclude_paths,
force=c.jinjaapi_force,
followlinks=c.jinjaapi_followlinks,
dryrun=c.jinjaapi_dryrun,
private=c.jinjaapi_includeprivate,
suffix=suffix,
template_dirs=c.templates_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.