code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import os
import numpy as np
class Graph:
def __init__(self, filename, mem_mode='lst', weighted=False, directed=False):
# Verificando se o nome do arquivo passado eh uma string
if (type(filename) != type('')):
raise TypeError('Graph expects a string with the path to the text file that describes the graph')
# Verificando se o modo de armazenamento em memoria passado eh valido
self.mem_mode = mem_mode.lower()
if (self.mem_mode != 'mtx') and (self.mem_mode != 'lst'):
raise ValueError('memory_mode must be either \"mtx\" (matrix) or \"lst\" (list)')
# Verificando se o arquivo passado existe
if not os.path.isfile(filename):
raise FileNotFoundError(f"file not found \"{filename}\"")
# -----------------------------------------------
self.name = os.path.split(filename)[1] # separando o nome do arquivo do diretorio
self.name = os.path.splitext(self.name)[0] # separando o nome do arquivo e a extensao
# -----------------------------------------------
self.shape = {'v': 0,'e': 0} # dicionario com o numero de vertices (v) e arestas (e)
self.adjac = np.array(0) # a matriz/lista de ajacencia
self.degrees = np.array(0) # degree[v]: o grau do vertice v
# -----------------------------------------------
self.weighted = weighted
self.has_neg_weights = False
self.directed = directed
# -----------------------------------------------
if self.mem_mode == 'mtx':
self.read_as_matrix(filename)
else:
self.read_as_list(filename)
# } __init__
# -----------------------------------------------
def read_as_matrix(self,filename):
# Lendo o arquivo
lines = []
with open(filename) as f:
lines = f.readlines() # lista de strings contendo o conteudo de cada linha
self.shape['v'] = int(lines[0].split()[0]) # numero de vertices
if self.weighted:
self.adjac = np.zeros( (self.shape['v'], self.shape['v']) ) # inicializando a matriz com zeros (False)
else:
self.adjac = np.zeros( (self.shape['v'], self.shape['v']), dtype=np.uint8) # inicializando a matriz com zeros (False)
self.degrees = np.zeros(self.shape['v']) # inicializando todos os graus dos vertices em zero
self.shape['e'] = 0
for edge in lines[1:]: # cada linha representa uma aresta
edge = edge.split() # separando a linha em duas strings contendo, cada uma, um dos vertices da aresta
edge[0] = int(edge[0]) - 1 # [NOTE] consideramos que os indices dos vertices no arquivo texto iniciam sempre em 1
edge[1] = int(edge[1]) - 1
if self.weighted:
edge[2] = float(edge[2]) # o peso da aresta
if edge[2] < 0:
self.has_neg_weights = True
if not self.adjac[ edge[0] ][ edge[1] ]: # verificamos se a aresta ja foi analisada e incluida no grafo
self.adjac[ edge[0] ][ edge[1] ] = True if not self.weighted else edge[2]
self.degrees[ edge[0] ] += 1
self.shape['e'] += 1
if not self.directed:
# quando o grafo eh nao-direcionado, a aresta (u,v) == (v,u)
self.adjac[ edge[1] ][ edge[0] ] = True if not self.weighted else edge[2]
self.degrees[ edge[1] ] += 1
return self.adjac
# -----------------------------------------------
def read_as_list(self, filename):
# Lendo o arquivo
lines = []
with open(filename) as f:
lines = f.readlines() # lista de strings contendo o conteudo de cada linha
self.shape['v'] = int(lines[0]) # numero de vertices
if not self.weighted:
self.adjac = [ [] for i in range(self.shape['v']) ] # adjac[v] contem a lista de vizinhos do vertice v
else:
# adjac[v] contem uma lista onde cada elemento eh uma dupla do tipo (u,w), em que
# 'u' eh o indice do vertice vizinho de v e 'w' eh o peso da aresta incidente a eles
self.adjac = [ [] for i in range(self.shape['v']) ]
self.degrees = np.zeros(self.shape['v']) # inicializando todos os graus dos vertices em zero
self.shape['e'] = 0
for edge in lines[1:]: # cada linha representa uma aresta
edge = edge.split() # separando a linha em duas strings contendo, cada uma, um dos vertices da aresta
edge[0] = int(edge[0]) - 1 # [NOTE] consideramos que os indices dos vertices no arquivo texto iniciam sempre em 1
edge[1] = int(edge[1]) - 1
if self.weighted:
edge[2] = float(edge[2])
if edge[2] < 0:
self.has_neg_weights = True
if not edge[1] in self.adjac[ edge[0] ]: # verificamos se a aresta ja foi analisada e incluida no grafo
if not self.weighted:
self.adjac[ edge[0] ].append( edge[1] )
else:
self.adjac[ edge[0] ].append( [edge[1],edge[2]] )
self.degrees[ edge[0] ] += 1
self.shape['e'] += 1
if not self.directed:
# como o grafo eh nao direcionado, a aresta (i,j) eh equivalente a (j,i) e incluiremos ambas
# self.adjac[ edge[1] ].append( edge[0] if not self.weighted else [edge[0], edge[2]] )
if not self.weighted:
self.adjac[ edge[1] ].append( edge[0] )
else:
self.adjac[ edge[1] ].append( [edge[0],edge[2]] )
self.degrees[ edge[1] ] += 1
self.adjac = np.array(self.adjac, dtype=object) # passando de lista para array numpy
# agora ordenamos as listas de vizinhos em ordem crescente de indices (a lista de pesos eh ordenada em concordancia com essa ordem)
for v in range(len(self.adjac)):
if not self.weighted:
self.adjac[v] = np.sort(self.adjac[v], axis=0) # ordenamos a lista de vizinhos de cada vertice
else:
self.adjac[v] = np.array(self.adjac[v])
if len(self.adjac[v]) > 0:
# se len(adjac[v]) == 0, entao v nao tem vizinhos e tentar acessa-los geraria um erro
neighbors = self.adjac[v][:,0] # lista (array) dos vizinhos de v
weights = self.adjac[v][:,1] # lista (array) dos pesos de cada aresta incidente a v
# # [REF] https://numpy.org/doc/stable/reference/generated/numpy.argsort.html
neighbors_sort_index = np.argsort(neighbors) # os indices a serem usados para ordenar a lista de vizinhos de v
self.adjac[v][:,0] = neighbors[neighbors_sort_index] # ordenamos a lista de vizinhos do menor para o maior
self.adjac[v][:,1] = weights[neighbors_sort_index] # ordenamos os pesos de acordo com a lista de vizinhos
return self.adjac
# -----------------------------------------------
def neighbors(self, vert):
"""
Recebe um indice de vertice e retorna uma lista (array numpy) com os indices dos vertices
vizinhos a ele
"""
if self.mem_mode == 'mtx':
return np.where( self.adjac[vert] != 0 )[0] # selecionamos, na linha referente ao vertice, os indices dos elementos que sao nao-nulos
else:
if not self.weighted:
return self.adjac[vert]
else:
if len(self.adjac[vert]) == 0:
# se len(adjac[v]) == 0, entao v nao tem vizinhos e tentar acessa-los geraria um erro
return np.int32([])
return np.int32(self.adjac[vert][:,0]) # pegamos, da linha do vertice, apenas a coluna referente ao indice dos vizinhos (e nao a dos pesos das arestas)
# -----------------------------------------------
def weights(self, vert):
"""
Recebe um indice de vertice e retorna uma lista (array numpy) com os pesos das arestas incidentes
a ele. A ordem dos pesos eh equivalente a ordem crescente dos indices dos vizinhos. Isto, eh,
o primeiro peso da lista eh referente ao vizinho de menor indice e assim por diante.
"""
if self.mem_mode == 'mtx':
return self.adjac[vert][ self.adjac[vert] != 0 ] # selecionamos, na linha referente ao vertice, os elementos nao-nulos
else:
if not self.weighted:
if len(self.adjac[vert]) == 0:
return np.int32([])
return np.ones(self.adjac[vert])
else:
if len(self.adjac[vert]) == 0:
return np.int32([])
return self.adjac[vert][:,1] # pegamos, da linha do vertice, apenas a coluna referente aos pesos das arestas incidentes
# -----------------------------------------------
def edge(self, v1, v2):
"""
Recebe dois indices de vertices e retorna o peso da aresta que liga eles (ou zero, caso os
vertices nao sejam vizinhos)
"""
if self.mem_mode == 'mtx':
return self.adjac[v1][v2]
else:
if not self.weighted:
# Podemos usar a funcao np.searchsorted() para percorrer a lista de vizinhos, que eh
# ordenada. Esta funcao tem complexidade O(log(n)), por isso eh uma alternativa
# mais interessante que percorrer a lista inteira buscando v2
where = np.searchsorted(self.adjac[v1], v2)
if (where == len(self.adjac[v1])) or (v2 != self.adjac[v1][where]):
# v2 nao esta na lista dos vizinhos
# se where == len: v2 maior que todos os vizinhos
return 0
return self.adjac[v1][where]
else:
if len(self.adjac[v1]) == 0:
# se len(adjac[v]) == 0, entao v nao tem vizinhos e tentar acessa-los geraria um erro
return 0
where = np.searchsorted(self.adjac[v1][:,0], v2)
if (where == len(self.adjac[v1])) or (v2 != self.adjac[v1][where,0]):
# v2 nao esta na lista dos vizinhos
# se where == len: v2 maior que todos os vizinhos
return 0
return self.adjac[v1][where,1]
# -----------------------------------------------
"""
[REF]
- https://www.tutorialsteacher.com/python/property-decorator
- https://www.programiz.com/python-programming/property
"""
@property
def n(self):
return self.shape['v']
@property
def m(self):
return self.shape['e']
# -----------------------------------------------
def dg_min(self):
return self.degrees.min()
def dg_max(self):
return self.degrees.max()
def dg_avg(self):
# de acordo com definicao do slide (aula 3 - slide 10)
return 2 * (self.m / self.n)
def dg_median(self):
return np.median(self.degrees)
# A funcao __len__() eh chamada quando usamos len(classe)
def __len__(self):
return self.shape['v']
# A funcao __repr__() eh chamada quando usamos print(classe)
def __repr__(self):
s = f'Graph \"{self.name}\" shape {self.shape}\n'
s += f'(mem_mode: {self.mem_mode}, weighted: {self.weighted}, has_neg_weights: {self.has_neg_weights}, directed: {self.directed})'
# s += f' {self.shape}\n'
# s += ' ' + np.array2string(self.adjac, prefix=' ')
return s
# A funcao __getitem__() eh chamada quando usamos classe[key]
def __getitem__(self, key):
return self.adjac[key]
def __iter__(self):
return iter(range(self.n))
# ----------------------------------------------- | guara/graph.py | import os
import numpy as np
class Graph:
def __init__(self, filename, mem_mode='lst', weighted=False, directed=False):
# Verificando se o nome do arquivo passado eh uma string
if (type(filename) != type('')):
raise TypeError('Graph expects a string with the path to the text file that describes the graph')
# Verificando se o modo de armazenamento em memoria passado eh valido
self.mem_mode = mem_mode.lower()
if (self.mem_mode != 'mtx') and (self.mem_mode != 'lst'):
raise ValueError('memory_mode must be either \"mtx\" (matrix) or \"lst\" (list)')
# Verificando se o arquivo passado existe
if not os.path.isfile(filename):
raise FileNotFoundError(f"file not found \"{filename}\"")
# -----------------------------------------------
self.name = os.path.split(filename)[1] # separando o nome do arquivo do diretorio
self.name = os.path.splitext(self.name)[0] # separando o nome do arquivo e a extensao
# -----------------------------------------------
self.shape = {'v': 0,'e': 0} # dicionario com o numero de vertices (v) e arestas (e)
self.adjac = np.array(0) # a matriz/lista de ajacencia
self.degrees = np.array(0) # degree[v]: o grau do vertice v
# -----------------------------------------------
self.weighted = weighted
self.has_neg_weights = False
self.directed = directed
# -----------------------------------------------
if self.mem_mode == 'mtx':
self.read_as_matrix(filename)
else:
self.read_as_list(filename)
# } __init__
# -----------------------------------------------
def read_as_matrix(self,filename):
# Lendo o arquivo
lines = []
with open(filename) as f:
lines = f.readlines() # lista de strings contendo o conteudo de cada linha
self.shape['v'] = int(lines[0].split()[0]) # numero de vertices
if self.weighted:
self.adjac = np.zeros( (self.shape['v'], self.shape['v']) ) # inicializando a matriz com zeros (False)
else:
self.adjac = np.zeros( (self.shape['v'], self.shape['v']), dtype=np.uint8) # inicializando a matriz com zeros (False)
self.degrees = np.zeros(self.shape['v']) # inicializando todos os graus dos vertices em zero
self.shape['e'] = 0
for edge in lines[1:]: # cada linha representa uma aresta
edge = edge.split() # separando a linha em duas strings contendo, cada uma, um dos vertices da aresta
edge[0] = int(edge[0]) - 1 # [NOTE] consideramos que os indices dos vertices no arquivo texto iniciam sempre em 1
edge[1] = int(edge[1]) - 1
if self.weighted:
edge[2] = float(edge[2]) # o peso da aresta
if edge[2] < 0:
self.has_neg_weights = True
if not self.adjac[ edge[0] ][ edge[1] ]: # verificamos se a aresta ja foi analisada e incluida no grafo
self.adjac[ edge[0] ][ edge[1] ] = True if not self.weighted else edge[2]
self.degrees[ edge[0] ] += 1
self.shape['e'] += 1
if not self.directed:
# quando o grafo eh nao-direcionado, a aresta (u,v) == (v,u)
self.adjac[ edge[1] ][ edge[0] ] = True if not self.weighted else edge[2]
self.degrees[ edge[1] ] += 1
return self.adjac
# -----------------------------------------------
def read_as_list(self, filename):
# Lendo o arquivo
lines = []
with open(filename) as f:
lines = f.readlines() # lista de strings contendo o conteudo de cada linha
self.shape['v'] = int(lines[0]) # numero de vertices
if not self.weighted:
self.adjac = [ [] for i in range(self.shape['v']) ] # adjac[v] contem a lista de vizinhos do vertice v
else:
# adjac[v] contem uma lista onde cada elemento eh uma dupla do tipo (u,w), em que
# 'u' eh o indice do vertice vizinho de v e 'w' eh o peso da aresta incidente a eles
self.adjac = [ [] for i in range(self.shape['v']) ]
self.degrees = np.zeros(self.shape['v']) # inicializando todos os graus dos vertices em zero
self.shape['e'] = 0
for edge in lines[1:]: # cada linha representa uma aresta
edge = edge.split() # separando a linha em duas strings contendo, cada uma, um dos vertices da aresta
edge[0] = int(edge[0]) - 1 # [NOTE] consideramos que os indices dos vertices no arquivo texto iniciam sempre em 1
edge[1] = int(edge[1]) - 1
if self.weighted:
edge[2] = float(edge[2])
if edge[2] < 0:
self.has_neg_weights = True
if not edge[1] in self.adjac[ edge[0] ]: # verificamos se a aresta ja foi analisada e incluida no grafo
if not self.weighted:
self.adjac[ edge[0] ].append( edge[1] )
else:
self.adjac[ edge[0] ].append( [edge[1],edge[2]] )
self.degrees[ edge[0] ] += 1
self.shape['e'] += 1
if not self.directed:
# como o grafo eh nao direcionado, a aresta (i,j) eh equivalente a (j,i) e incluiremos ambas
# self.adjac[ edge[1] ].append( edge[0] if not self.weighted else [edge[0], edge[2]] )
if not self.weighted:
self.adjac[ edge[1] ].append( edge[0] )
else:
self.adjac[ edge[1] ].append( [edge[0],edge[2]] )
self.degrees[ edge[1] ] += 1
self.adjac = np.array(self.adjac, dtype=object) # passando de lista para array numpy
# agora ordenamos as listas de vizinhos em ordem crescente de indices (a lista de pesos eh ordenada em concordancia com essa ordem)
for v in range(len(self.adjac)):
if not self.weighted:
self.adjac[v] = np.sort(self.adjac[v], axis=0) # ordenamos a lista de vizinhos de cada vertice
else:
self.adjac[v] = np.array(self.adjac[v])
if len(self.adjac[v]) > 0:
# se len(adjac[v]) == 0, entao v nao tem vizinhos e tentar acessa-los geraria um erro
neighbors = self.adjac[v][:,0] # lista (array) dos vizinhos de v
weights = self.adjac[v][:,1] # lista (array) dos pesos de cada aresta incidente a v
# # [REF] https://numpy.org/doc/stable/reference/generated/numpy.argsort.html
neighbors_sort_index = np.argsort(neighbors) # os indices a serem usados para ordenar a lista de vizinhos de v
self.adjac[v][:,0] = neighbors[neighbors_sort_index] # ordenamos a lista de vizinhos do menor para o maior
self.adjac[v][:,1] = weights[neighbors_sort_index] # ordenamos os pesos de acordo com a lista de vizinhos
return self.adjac
# -----------------------------------------------
def neighbors(self, vert):
"""
Recebe um indice de vertice e retorna uma lista (array numpy) com os indices dos vertices
vizinhos a ele
"""
if self.mem_mode == 'mtx':
return np.where( self.adjac[vert] != 0 )[0] # selecionamos, na linha referente ao vertice, os indices dos elementos que sao nao-nulos
else:
if not self.weighted:
return self.adjac[vert]
else:
if len(self.adjac[vert]) == 0:
# se len(adjac[v]) == 0, entao v nao tem vizinhos e tentar acessa-los geraria um erro
return np.int32([])
return np.int32(self.adjac[vert][:,0]) # pegamos, da linha do vertice, apenas a coluna referente ao indice dos vizinhos (e nao a dos pesos das arestas)
# -----------------------------------------------
def weights(self, vert):
"""
Recebe um indice de vertice e retorna uma lista (array numpy) com os pesos das arestas incidentes
a ele. A ordem dos pesos eh equivalente a ordem crescente dos indices dos vizinhos. Isto, eh,
o primeiro peso da lista eh referente ao vizinho de menor indice e assim por diante.
"""
if self.mem_mode == 'mtx':
return self.adjac[vert][ self.adjac[vert] != 0 ] # selecionamos, na linha referente ao vertice, os elementos nao-nulos
else:
if not self.weighted:
if len(self.adjac[vert]) == 0:
return np.int32([])
return np.ones(self.adjac[vert])
else:
if len(self.adjac[vert]) == 0:
return np.int32([])
return self.adjac[vert][:,1] # pegamos, da linha do vertice, apenas a coluna referente aos pesos das arestas incidentes
# -----------------------------------------------
def edge(self, v1, v2):
"""
Recebe dois indices de vertices e retorna o peso da aresta que liga eles (ou zero, caso os
vertices nao sejam vizinhos)
"""
if self.mem_mode == 'mtx':
return self.adjac[v1][v2]
else:
if not self.weighted:
# Podemos usar a funcao np.searchsorted() para percorrer a lista de vizinhos, que eh
# ordenada. Esta funcao tem complexidade O(log(n)), por isso eh uma alternativa
# mais interessante que percorrer a lista inteira buscando v2
where = np.searchsorted(self.adjac[v1], v2)
if (where == len(self.adjac[v1])) or (v2 != self.adjac[v1][where]):
# v2 nao esta na lista dos vizinhos
# se where == len: v2 maior que todos os vizinhos
return 0
return self.adjac[v1][where]
else:
if len(self.adjac[v1]) == 0:
# se len(adjac[v]) == 0, entao v nao tem vizinhos e tentar acessa-los geraria um erro
return 0
where = np.searchsorted(self.adjac[v1][:,0], v2)
if (where == len(self.adjac[v1])) or (v2 != self.adjac[v1][where,0]):
# v2 nao esta na lista dos vizinhos
# se where == len: v2 maior que todos os vizinhos
return 0
return self.adjac[v1][where,1]
# -----------------------------------------------
"""
[REF]
- https://www.tutorialsteacher.com/python/property-decorator
- https://www.programiz.com/python-programming/property
"""
@property
def n(self):
return self.shape['v']
@property
def m(self):
return self.shape['e']
# -----------------------------------------------
def dg_min(self):
return self.degrees.min()
def dg_max(self):
return self.degrees.max()
def dg_avg(self):
# de acordo com definicao do slide (aula 3 - slide 10)
return 2 * (self.m / self.n)
def dg_median(self):
return np.median(self.degrees)
# A funcao __len__() eh chamada quando usamos len(classe)
def __len__(self):
return self.shape['v']
# A funcao __repr__() eh chamada quando usamos print(classe)
def __repr__(self):
s = f'Graph \"{self.name}\" shape {self.shape}\n'
s += f'(mem_mode: {self.mem_mode}, weighted: {self.weighted}, has_neg_weights: {self.has_neg_weights}, directed: {self.directed})'
# s += f' {self.shape}\n'
# s += ' ' + np.array2string(self.adjac, prefix=' ')
return s
# A funcao __getitem__() eh chamada quando usamos classe[key]
def __getitem__(self, key):
return self.adjac[key]
def __iter__(self):
return iter(range(self.n))
# ----------------------------------------------- | 0.422505 | 0.409811 |
from byteplay import Code
from byteplay import LOAD_FAST
from byteplay import LOAD_ATTR
from byteplay import LOAD_CONST
from byteplay import BINARY_SUBSCR
from byteplay import STORE_FAST
import inspect
_ANY_VALUE = object()
class _MatchingTuple(tuple):
def __eq__(self, other):
for s, o in zip(self, other):
if (s is not _ANY_VALUE) and (s != o):
return False
return True
# variation on stackoverflow #3847386 by <NAME>
def _accumulate_contained(small, big):
accumulator = []
for i in xrange(len(big) - len(small)+1):
for j in xrange(len(small)):
if small[j] == big[i+j]:
pass
else:
break
else:
accumulator.append(i)
return accumulator
def _get_accumulated(func, usage_matcher, target_offset):
# what should we name this func??
accumulator = []
disassembled_code = Code.from_code(func.func_code).code
contained = _accumulate_contained(usage_matcher, disassembled_code)
for contained_index in contained:
accumulator.append(disassembled_code[contained_index + target_offset][1])
return accumulator
def get_getitem_accesses(obj, attr):
"""
For whatever member function of obj, return a set containing all the keys
for which obj.attr is accessed through __getitem__.
"""
usage_matcher = [(LOAD_FAST, 'self'), (LOAD_ATTR, attr),
_MatchingTuple((LOAD_CONST, _ANY_VALUE)), (BINARY_SUBSCR, None)]
funcs = inspect.getmembers(obj, inspect.ismethod)
used_keys = set()
# directly accessed self.attr __getitem__
for name, func in funcs:
used_keys.update(_get_accumulated(func, usage_matcher, 2))
# accessed after saving props somewhere
usage_matcher = [(LOAD_FAST, 'self'), (LOAD_ATTR, attr),
_MatchingTuple((STORE_FAST, _ANY_VALUE))]
for name, func in funcs:
stored_attributes = _get_accumulated(func, usage_matcher, 2)
for stored_attr in stored_attributes:
attr_usage_matcher = [(LOAD_FAST, stored_attr),
_MatchingTuple((LOAD_CONST, _ANY_VALUE)), (BINARY_SUBSCR, None)]
used_keys.update(_get_accumulated(func, attr_usage_matcher, 1))
return used_keys | pydenji/userproperties/codescraper.py |
from byteplay import Code
from byteplay import LOAD_FAST
from byteplay import LOAD_ATTR
from byteplay import LOAD_CONST
from byteplay import BINARY_SUBSCR
from byteplay import STORE_FAST
import inspect
_ANY_VALUE = object()
class _MatchingTuple(tuple):
def __eq__(self, other):
for s, o in zip(self, other):
if (s is not _ANY_VALUE) and (s != o):
return False
return True
# variation on stackoverflow #3847386 by <NAME>
def _accumulate_contained(small, big):
accumulator = []
for i in xrange(len(big) - len(small)+1):
for j in xrange(len(small)):
if small[j] == big[i+j]:
pass
else:
break
else:
accumulator.append(i)
return accumulator
def _get_accumulated(func, usage_matcher, target_offset):
# what should we name this func??
accumulator = []
disassembled_code = Code.from_code(func.func_code).code
contained = _accumulate_contained(usage_matcher, disassembled_code)
for contained_index in contained:
accumulator.append(disassembled_code[contained_index + target_offset][1])
return accumulator
def get_getitem_accesses(obj, attr):
"""
For whatever member function of obj, return a set containing all the keys
for which obj.attr is accessed through __getitem__.
"""
usage_matcher = [(LOAD_FAST, 'self'), (LOAD_ATTR, attr),
_MatchingTuple((LOAD_CONST, _ANY_VALUE)), (BINARY_SUBSCR, None)]
funcs = inspect.getmembers(obj, inspect.ismethod)
used_keys = set()
# directly accessed self.attr __getitem__
for name, func in funcs:
used_keys.update(_get_accumulated(func, usage_matcher, 2))
# accessed after saving props somewhere
usage_matcher = [(LOAD_FAST, 'self'), (LOAD_ATTR, attr),
_MatchingTuple((STORE_FAST, _ANY_VALUE))]
for name, func in funcs:
stored_attributes = _get_accumulated(func, usage_matcher, 2)
for stored_attr in stored_attributes:
attr_usage_matcher = [(LOAD_FAST, stored_attr),
_MatchingTuple((LOAD_CONST, _ANY_VALUE)), (BINARY_SUBSCR, None)]
used_keys.update(_get_accumulated(func, attr_usage_matcher, 1))
return used_keys | 0.374219 | 0.111386 |
from pygears import alternative, TypeMatchError, gear
from pygears.typing import Union
from pygears.lib import fmap as common_fmap
from pygears.lib.mux import mux
from pygears.lib.demux import demux_ctrl
from pygears.lib.ccat import ccat
from pygears.lib.shred import shred
def unionmap_check(dtype, f, mapping):
if not issubclass(dtype, Union):
return False
try:
num_f = len(f)
except TypeError:
raise TypeMatchError(
f'Union fmap argument "f" needs to be a sequence, received {f}')
if mapping is None:
num_types = len(list(dtype.types))
else:
num_types = max(mapping.values()) + 1
if num_types != num_f:
raise TypeMatchError(
'Number of union types different from the number of fmap functions'
)
return True
@alternative(common_fmap)
@gear(enablement=b'unionmap_check(din, f, mapping)')
def unionmap(din,
*,
f,
fdemux=demux_ctrl,
fmux=mux,
balance=None,
mapping=None,
use_dflt=True):
if mapping:
fdemux = fdemux(mapping=mapping)
fmux = fmux(mapping=mapping)
demux_dout = din | fdemux
ctrl = demux_dout[0]
branches = demux_dout[1:]
dout = []
for i, fd in enumerate(f):
if fd is None:
if balance is None:
dout.append(branches[i])
else:
dout.append(branches[i] | balance)
else:
dout.append(fd(branches[i]))
if dout[-1] is None or isinstance(dout[-1], tuple):
ret = 'none' if dout[-1] is None else f'{len(dout[-1])} outputs'
raise TypeMatchError(
f'Gear "{fd}" passed to the unionmap should have a single output, but returned {ret}'
)
# Situation where there is a default branch because of mapping
if len(branches) == len(dout) + 1 and mapping is not None:
if use_dflt:
dout.append(branches[-1])
else:
branches[-1] | shred
elif len(branches) > len(dout):
raise Exception
if balance is not None:
ctrl = ctrl | balance
if len(dout) == 1:
return ccat(*dout, ctrl) | Union
else:
return fmux(ctrl, *dout) | pygears/lib/fmaps/union.py | from pygears import alternative, TypeMatchError, gear
from pygears.typing import Union
from pygears.lib import fmap as common_fmap
from pygears.lib.mux import mux
from pygears.lib.demux import demux_ctrl
from pygears.lib.ccat import ccat
from pygears.lib.shred import shred
def unionmap_check(dtype, f, mapping):
if not issubclass(dtype, Union):
return False
try:
num_f = len(f)
except TypeError:
raise TypeMatchError(
f'Union fmap argument "f" needs to be a sequence, received {f}')
if mapping is None:
num_types = len(list(dtype.types))
else:
num_types = max(mapping.values()) + 1
if num_types != num_f:
raise TypeMatchError(
'Number of union types different from the number of fmap functions'
)
return True
@alternative(common_fmap)
@gear(enablement=b'unionmap_check(din, f, mapping)')
def unionmap(din,
*,
f,
fdemux=demux_ctrl,
fmux=mux,
balance=None,
mapping=None,
use_dflt=True):
if mapping:
fdemux = fdemux(mapping=mapping)
fmux = fmux(mapping=mapping)
demux_dout = din | fdemux
ctrl = demux_dout[0]
branches = demux_dout[1:]
dout = []
for i, fd in enumerate(f):
if fd is None:
if balance is None:
dout.append(branches[i])
else:
dout.append(branches[i] | balance)
else:
dout.append(fd(branches[i]))
if dout[-1] is None or isinstance(dout[-1], tuple):
ret = 'none' if dout[-1] is None else f'{len(dout[-1])} outputs'
raise TypeMatchError(
f'Gear "{fd}" passed to the unionmap should have a single output, but returned {ret}'
)
# Situation where there is a default branch because of mapping
if len(branches) == len(dout) + 1 and mapping is not None:
if use_dflt:
dout.append(branches[-1])
else:
branches[-1] | shred
elif len(branches) > len(dout):
raise Exception
if balance is not None:
ctrl = ctrl | balance
if len(dout) == 1:
return ccat(*dout, ctrl) | Union
else:
return fmux(ctrl, *dout) | 0.493409 | 0.459925 |
import datetime
from toolium.utils.dataset import replace_param
def test_replace_param_no_string():
param = replace_param(1234)
assert param == 1234
def test_replace_param_no_pattern():
param = replace_param('my param')
assert param == 'my param'
def test_replace_param_incomplete_pattern():
param = replace_param('[INTEGER_WITH_LENGTH_4')
assert param == '[INTEGER_WITH_LENGTH_4'
def test_replace_param_string_with_length():
param = replace_param('[STRING_WITH_LENGTH_5]')
assert param == 'aaaaa'
def test_replace_param_string_array_with_length():
param = replace_param('[STRING_ARRAY_WITH_LENGTH_5]')
assert param == ['a', 'a', 'a', 'a', 'a']
def test_replace_param_integer_with_length():
param = replace_param('[INTEGER_WITH_LENGTH_4]')
assert param == 1111
def test_replace_param_integer_array_with_length():
param = replace_param('[INTEGER_ARRAY_WITH_LENGTH_4]')
assert param == [1, 1, 1, 1]
def test_replace_param_float_with_length():
param = replace_param('[FLOAT_WITH_LENGTH_4]')
assert param == '[FLOAT_WITH_LENGTH_4]'
def test_replace_param_float_array_with_length():
param = replace_param('[FLOAT_ARRAY_WITH_LENGTH_4]')
assert param == '[FLOAT_ARRAY_WITH_LENGTH_4]'
def test_replace_param_json_with_length():
param = replace_param('[JSON_WITH_LENGTH_3]')
assert param == {'0': '0', '1': '1', '2': '2'}
def test_replace_param_incomplete_integer_with_length():
param = replace_param('[INTEGER_WITH_LENGTH_4')
assert param == '[INTEGER_WITH_LENGTH_4'
def test_replace_param_missing_param():
param = replace_param('[MISSING_PARAM]')
assert param is None
def test_replace_param_null():
param = replace_param('[NULL]')
assert param is None
def test_replace_param_true():
param = replace_param('[TRUE]')
assert param is True
def test_replace_param_false():
param = replace_param('[FALSE]')
assert param is False
def test_replace_param_empty():
param = replace_param('[EMPTY]')
assert param == ''
def test_replace_param_blank():
param = replace_param('[B]')
assert param == ' '
def test_replace_param_random():
param = replace_param('[RANDOM]')
assert len(param) == 8
assert type(param) == str
def test_replace_param_random_phone_number_with_type_inference():
param = replace_param('[RANDOM_PHONE_NUMBER]')
assert type(param) == int
assert len(str(param)) == 11
def test_replace_param_random_phone_number_without_type_inference():
param = replace_param('[RANDOM_PHONE_NUMBER]', infer_param_type=False)
assert type(param) == str
assert len(param) == 12
assert param.startswith('+34654')
def test_replace_param_random_phone_number_with_type_inference_forcing_str():
param = replace_param('[STR:[RANDOM_PHONE_NUMBER]]')
assert type(param) == str
assert len(param) == 12
assert param.startswith('+34654')
def test_replace_param_timestamp_with_type_inference():
param = replace_param('[TIMESTAMP]')
assert type(param) == int
assert datetime.datetime.strptime(str(datetime.datetime.fromtimestamp(param)), '%Y-%m-%d %H:%M:%S')
def test_replace_param_timestamp_without_type_inference():
param = replace_param('[TIMESTAMP]', infer_param_type=False)
assert type(param) == str
assert len(param) == 10
assert datetime.datetime.strptime(str(datetime.datetime.fromtimestamp(int(param))), '%Y-%m-%d %H:%M:%S')
def test_replace_param_timestamp_with_type_inference_forcing_str():
param = replace_param('[STR:[TIMESTAMP]]')
assert type(param) == str
assert len(param) == 10
assert datetime.datetime.strptime(str(datetime.datetime.fromtimestamp(int(param))), '%Y-%m-%d %H:%M:%S')
def test_replace_param_datetime():
param = replace_param('[DATETIME]')
assert datetime.datetime.strptime(param, '%Y-%m-%d %H:%M:%S.%f')
def test_replace_param_datetime_language_ignored():
param = replace_param('[DATETIME]', language='es')
assert datetime.datetime.strptime(param, '%Y-%m-%d %H:%M:%S.%f')
def test_replace_param_today_spanish():
param = replace_param('[TODAY]', language='es')
assert param == datetime.datetime.today().strftime('%d/%m/%Y')
def test_replace_param_today_not_spanish():
param = replace_param('[TODAY]', language='en')
assert param == datetime.datetime.today().strftime('%Y/%m/%d')
def test_replace_param_today_offset():
param = replace_param('[TODAY - 1 DAYS]', language='es')
assert param == datetime.datetime.strftime(
datetime.datetime.today() - datetime.timedelta(days=1), '%d/%m/%Y')
def test_replace_param_now_spanish():
param = replace_param('[NOW]', language='es')
assert param == datetime.datetime.utcnow().strftime('%d/%m/%Y %H:%M:%S')
def test_replace_param_now_not_spanish():
param = replace_param('[NOW]', language='it')
assert param == datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S')
def test_replace_param_now_offset():
param = replace_param('[NOW + 5 MINUTES]', language='es')
assert param == datetime.datetime.strftime(
datetime.datetime.utcnow() + datetime.timedelta(minutes=5), '%d/%m/%Y %H:%M:%S')
def test_replace_param_str_int():
param = replace_param('[STR:28]')
assert type(param) == str
assert param == '28'
def test_replace_param_str():
param = replace_param('[STR:abc]')
assert type(param) == str
assert param == 'abc'
def test_replace_param_int():
param = replace_param('[INT:28]')
assert type(param) == int
assert param == 28
def test_replace_param_float():
param = replace_param('[FLOAT:28]')
assert type(param) == float
assert param == 28.0
def test_replace_param_list_integers():
param = replace_param('[LIST:[1,2,3]]')
assert type(param) == list
assert param == [1, 2, 3]
def test_replace_param_list_strings():
param = replace_param("[LIST:['1','2','3']]")
assert type(param) == list
assert param == ['1', '2', '3']
def test_replace_param_dict():
param = replace_param("[DICT:{'a':'test1','b':'test2','c':'test3'}]")
assert type(param) == dict
assert param == {'a': 'test1', 'b': 'test2', 'c': 'test3'}
def test_replace_param_upper():
param = replace_param('[UPPER:test]')
assert param == 'TEST'
param = replace_param('[UPPER:TeSt]')
assert param == 'TEST'
def test_replace_param_lower():
param = replace_param('[LOWER:TEST]')
assert param == 'test'
param = replace_param('[LOWER:TeSt]')
assert param == 'test'
def test_replace_param_type_inference():
param = replace_param('1234') # int
assert param == 1234
param = replace_param('0.5') # float
assert param == 0.5
param = replace_param('True') # boolean
assert param is True
param = replace_param('None') # None
assert param is None
param = replace_param("{'a':'test1', 'b':True, 'c':None}") # dict
assert param == {'a': 'test1', 'b': True, 'c': None}
param = replace_param("['1', True,None]") # list
assert param == ['1', True, None]
param = replace_param('{"a":"test1", "b":true, "c":null}') # JSON object
assert param == {'a': 'test1', 'b': True, 'c': None}
param = replace_param('["1", true, null]') # JSON list
assert param == ['1', True, None]
param = replace_param('true') # JSON boolean
assert param == 'true'
param = replace_param('null') # JSON null
assert param == 'null'
def test_replace_param_type_inference_disabled():
param = replace_param('1234', infer_param_type=False)
assert param == '1234'
param = replace_param('0.5', infer_param_type=False)
assert param == '0.5'
param = replace_param('True', infer_param_type=False)
assert param == 'True'
param = replace_param('None', infer_param_type=False)
assert param == 'None'
param = replace_param("{'a':'test1', 'b':True, 'c':None}", infer_param_type=False)
assert param == "{'a':'test1', 'b':True, 'c':None}"
param = replace_param("['1', True, None]", infer_param_type=False)
assert param == "['1', True, None]"
param = replace_param('{"a":"test1", "b":true, "c":null}', infer_param_type=False)
assert param == '{"a":"test1", "b":true, "c":null}'
param = replace_param('["1", true, null]', infer_param_type=False)
assert param == '["1", true, null]' | toolium/test/utils/test_dataset_replace_param.py | import datetime
from toolium.utils.dataset import replace_param
def test_replace_param_no_string():
param = replace_param(1234)
assert param == 1234
def test_replace_param_no_pattern():
param = replace_param('my param')
assert param == 'my param'
def test_replace_param_incomplete_pattern():
param = replace_param('[INTEGER_WITH_LENGTH_4')
assert param == '[INTEGER_WITH_LENGTH_4'
def test_replace_param_string_with_length():
param = replace_param('[STRING_WITH_LENGTH_5]')
assert param == 'aaaaa'
def test_replace_param_string_array_with_length():
param = replace_param('[STRING_ARRAY_WITH_LENGTH_5]')
assert param == ['a', 'a', 'a', 'a', 'a']
def test_replace_param_integer_with_length():
param = replace_param('[INTEGER_WITH_LENGTH_4]')
assert param == 1111
def test_replace_param_integer_array_with_length():
param = replace_param('[INTEGER_ARRAY_WITH_LENGTH_4]')
assert param == [1, 1, 1, 1]
def test_replace_param_float_with_length():
param = replace_param('[FLOAT_WITH_LENGTH_4]')
assert param == '[FLOAT_WITH_LENGTH_4]'
def test_replace_param_float_array_with_length():
param = replace_param('[FLOAT_ARRAY_WITH_LENGTH_4]')
assert param == '[FLOAT_ARRAY_WITH_LENGTH_4]'
def test_replace_param_json_with_length():
param = replace_param('[JSON_WITH_LENGTH_3]')
assert param == {'0': '0', '1': '1', '2': '2'}
def test_replace_param_incomplete_integer_with_length():
param = replace_param('[INTEGER_WITH_LENGTH_4')
assert param == '[INTEGER_WITH_LENGTH_4'
def test_replace_param_missing_param():
param = replace_param('[MISSING_PARAM]')
assert param is None
def test_replace_param_null():
param = replace_param('[NULL]')
assert param is None
def test_replace_param_true():
param = replace_param('[TRUE]')
assert param is True
def test_replace_param_false():
param = replace_param('[FALSE]')
assert param is False
def test_replace_param_empty():
param = replace_param('[EMPTY]')
assert param == ''
def test_replace_param_blank():
param = replace_param('[B]')
assert param == ' '
def test_replace_param_random():
param = replace_param('[RANDOM]')
assert len(param) == 8
assert type(param) == str
def test_replace_param_random_phone_number_with_type_inference():
param = replace_param('[RANDOM_PHONE_NUMBER]')
assert type(param) == int
assert len(str(param)) == 11
def test_replace_param_random_phone_number_without_type_inference():
param = replace_param('[RANDOM_PHONE_NUMBER]', infer_param_type=False)
assert type(param) == str
assert len(param) == 12
assert param.startswith('+34654')
def test_replace_param_random_phone_number_with_type_inference_forcing_str():
param = replace_param('[STR:[RANDOM_PHONE_NUMBER]]')
assert type(param) == str
assert len(param) == 12
assert param.startswith('+34654')
def test_replace_param_timestamp_with_type_inference():
param = replace_param('[TIMESTAMP]')
assert type(param) == int
assert datetime.datetime.strptime(str(datetime.datetime.fromtimestamp(param)), '%Y-%m-%d %H:%M:%S')
def test_replace_param_timestamp_without_type_inference():
param = replace_param('[TIMESTAMP]', infer_param_type=False)
assert type(param) == str
assert len(param) == 10
assert datetime.datetime.strptime(str(datetime.datetime.fromtimestamp(int(param))), '%Y-%m-%d %H:%M:%S')
def test_replace_param_timestamp_with_type_inference_forcing_str():
param = replace_param('[STR:[TIMESTAMP]]')
assert type(param) == str
assert len(param) == 10
assert datetime.datetime.strptime(str(datetime.datetime.fromtimestamp(int(param))), '%Y-%m-%d %H:%M:%S')
def test_replace_param_datetime():
param = replace_param('[DATETIME]')
assert datetime.datetime.strptime(param, '%Y-%m-%d %H:%M:%S.%f')
def test_replace_param_datetime_language_ignored():
param = replace_param('[DATETIME]', language='es')
assert datetime.datetime.strptime(param, '%Y-%m-%d %H:%M:%S.%f')
def test_replace_param_today_spanish():
param = replace_param('[TODAY]', language='es')
assert param == datetime.datetime.today().strftime('%d/%m/%Y')
def test_replace_param_today_not_spanish():
param = replace_param('[TODAY]', language='en')
assert param == datetime.datetime.today().strftime('%Y/%m/%d')
def test_replace_param_today_offset():
param = replace_param('[TODAY - 1 DAYS]', language='es')
assert param == datetime.datetime.strftime(
datetime.datetime.today() - datetime.timedelta(days=1), '%d/%m/%Y')
def test_replace_param_now_spanish():
param = replace_param('[NOW]', language='es')
assert param == datetime.datetime.utcnow().strftime('%d/%m/%Y %H:%M:%S')
def test_replace_param_now_not_spanish():
param = replace_param('[NOW]', language='it')
assert param == datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S')
def test_replace_param_now_offset():
param = replace_param('[NOW + 5 MINUTES]', language='es')
assert param == datetime.datetime.strftime(
datetime.datetime.utcnow() + datetime.timedelta(minutes=5), '%d/%m/%Y %H:%M:%S')
def test_replace_param_str_int():
param = replace_param('[STR:28]')
assert type(param) == str
assert param == '28'
def test_replace_param_str():
param = replace_param('[STR:abc]')
assert type(param) == str
assert param == 'abc'
def test_replace_param_int():
param = replace_param('[INT:28]')
assert type(param) == int
assert param == 28
def test_replace_param_float():
param = replace_param('[FLOAT:28]')
assert type(param) == float
assert param == 28.0
def test_replace_param_list_integers():
param = replace_param('[LIST:[1,2,3]]')
assert type(param) == list
assert param == [1, 2, 3]
def test_replace_param_list_strings():
param = replace_param("[LIST:['1','2','3']]")
assert type(param) == list
assert param == ['1', '2', '3']
def test_replace_param_dict():
param = replace_param("[DICT:{'a':'test1','b':'test2','c':'test3'}]")
assert type(param) == dict
assert param == {'a': 'test1', 'b': 'test2', 'c': 'test3'}
def test_replace_param_upper():
param = replace_param('[UPPER:test]')
assert param == 'TEST'
param = replace_param('[UPPER:TeSt]')
assert param == 'TEST'
def test_replace_param_lower():
param = replace_param('[LOWER:TEST]')
assert param == 'test'
param = replace_param('[LOWER:TeSt]')
assert param == 'test'
def test_replace_param_type_inference():
param = replace_param('1234') # int
assert param == 1234
param = replace_param('0.5') # float
assert param == 0.5
param = replace_param('True') # boolean
assert param is True
param = replace_param('None') # None
assert param is None
param = replace_param("{'a':'test1', 'b':True, 'c':None}") # dict
assert param == {'a': 'test1', 'b': True, 'c': None}
param = replace_param("['1', True,None]") # list
assert param == ['1', True, None]
param = replace_param('{"a":"test1", "b":true, "c":null}') # JSON object
assert param == {'a': 'test1', 'b': True, 'c': None}
param = replace_param('["1", true, null]') # JSON list
assert param == ['1', True, None]
param = replace_param('true') # JSON boolean
assert param == 'true'
param = replace_param('null') # JSON null
assert param == 'null'
def test_replace_param_type_inference_disabled():
param = replace_param('1234', infer_param_type=False)
assert param == '1234'
param = replace_param('0.5', infer_param_type=False)
assert param == '0.5'
param = replace_param('True', infer_param_type=False)
assert param == 'True'
param = replace_param('None', infer_param_type=False)
assert param == 'None'
param = replace_param("{'a':'test1', 'b':True, 'c':None}", infer_param_type=False)
assert param == "{'a':'test1', 'b':True, 'c':None}"
param = replace_param("['1', True, None]", infer_param_type=False)
assert param == "['1', True, None]"
param = replace_param('{"a":"test1", "b":true, "c":null}', infer_param_type=False)
assert param == '{"a":"test1", "b":true, "c":null}'
param = replace_param('["1", true, null]', infer_param_type=False)
assert param == '["1", true, null]' | 0.445771 | 0.480113 |
from maclookup import *
from maclookup.models import *
from maclookup.exceptions import EmptyResponseException
from .mock_requester import MockRequester
import unittest
from dateutil.parser import parse
class ApiClientTest(unittest.TestCase):
def setUp(self):
pass
def test_parsing(self):
payload = """{
"vendorDetails":
{
"oui": "B0ECA1",
"isPrivate": 0,
"companyName": "CISCO",
"companyAddress": "",
"countryCode": "US"
},
"blockDetails":
{
"blockFound": 1,
"borderLeft": "B0ECA1B0ECA1",
"borderRight": "B0ECE1B0ECA1",
"blockSize": 32000,
"assignmentBlockSize": "MA-M",
"dateCreated": "26-09-1999",
"dateUpdated": "26-09-2017"
},
"macAddressDetails":
{
"searchTerm": "B0ECE1B0ECA1",
"isValid": 1,
"transmissionType": "multicast",
"administrationType": "UAA"
}
}"""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
valid_object = ResponseModel()
valid_object.vendor_details = VendorDetails()
valid_object.vendor_details.oui = "B0ECA1"
valid_object.vendor_details.is_private = 0
valid_object.vendor_details.company_name = "CISCO"
valid_object.vendor_details.company_address = ""
valid_object.vendor_details.country_code = "US"
valid_object.block_details = BlockDetails()
valid_object.block_details.block_found = 1
valid_object.block_details.border_left = "B0ECA1B0ECA1"
valid_object.block_details.border_right = "B0ECE1B0ECA1"
valid_object.block_details.block_size = 32000
valid_object.block_details.assignment_block_size = "MA-M"
valid_object.block_details.date_created = parse("26-09-1999")
valid_object.block_details.date_updated = parse("26-09-2017")
valid_object.mac_address_details = MacAddressDetails()
valid_object.mac_address_details.search_term = "B0ECE1B0ECA1"
valid_object.mac_address_details.is_valid = 1
valid_object.mac_address_details.transmission_type = "multicast"
valid_object.mac_address_details.administration_type = "UAA"
self.assertEqual(client.get('MAC'), valid_object)
def test_vendor_name(self):
payload = "Test Company Name, Inc."
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
self.assertEqual(client.get_vendor("MAC"), payload)
def test_raw_data(self):
payload = """<Response>
<vendorDetails>
<oui>F40F24</oui>
<isPrivate>false</isPrivate>
<companyName>Apple, Inc</companyName>
<companyAddress>1 Infinite Loop Cupertino CA 95014 US</companyAddress>
<countryCode>US</countryCode>
</vendorDetails>
<blockDetails>
<blockFound>true</blockFound>
<borderLeft>F40F240000000000</borderLeft>
<borderRight>F40F24FFFFFFFFFF</borderRight>
<blockSize>1099511627776</blockSize>
<assignmentBlockSize>MA-L</assignmentBlockSize>
<dateCreated></dateCreated>
<dateUpdated></dateUpdated>
</blockDetails>
<macAddressDetails>
<searchTerm>F40F2436DA57</searchTerm>
<isValid>true</isValid>
<transmissionType>multicast</transmissionType>
<administrationType>LAA</administrationType>
</macAddressDetails>
</Response><Response>
<vendorDetails>
<oui>F40F24</oui>
<isPrivate>false</isPrivate>
<companyName>Apple, Inc</companyName>
<companyAddress>1 Infinite Loop Cupertino CA 95014 US</companyAddress>
<countryCode>US</countryCode>
</vendorDetails>
<blockDetails>
<blockFound>true</blockFound>
<borderLeft>F40F240000000000</borderLeft>
<borderRight>F40F24FFFFFFFFFF</borderRight>
<blockSize>1099511627776</blockSize>
<assignmentBlockSize>MA-L</assignmentBlockSize>
<dateCreated></dateCreated>
<dateUpdated></dateUpdated>
</blockDetails>
<macAddressDetails>
<searchTerm>F40F2436DA57</searchTerm>
<isValid>true</isValid>
<transmissionType>multicast</transmissionType>
<administrationType>LAA</administrationType>
</macAddressDetails>
</Response>"""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
self.assertEqual(client.get_raw_data("MAC", "xml"), payload)
def test_get_empty_response(self):
payload = ""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
with self.assertRaises(EmptyResponseException):
client.get("MAC")
def test_get_raw_data_empty_response(self):
payload = ""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
with self.assertRaises(EmptyResponseException):
client.get_raw_data("MAC", "xml")
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() | tests/api_client_test.py | from maclookup import *
from maclookup.models import *
from maclookup.exceptions import EmptyResponseException
from .mock_requester import MockRequester
import unittest
from dateutil.parser import parse
class ApiClientTest(unittest.TestCase):
def setUp(self):
pass
def test_parsing(self):
payload = """{
"vendorDetails":
{
"oui": "B0ECA1",
"isPrivate": 0,
"companyName": "CISCO",
"companyAddress": "",
"countryCode": "US"
},
"blockDetails":
{
"blockFound": 1,
"borderLeft": "B0ECA1B0ECA1",
"borderRight": "B0ECE1B0ECA1",
"blockSize": 32000,
"assignmentBlockSize": "MA-M",
"dateCreated": "26-09-1999",
"dateUpdated": "26-09-2017"
},
"macAddressDetails":
{
"searchTerm": "B0ECE1B0ECA1",
"isValid": 1,
"transmissionType": "multicast",
"administrationType": "UAA"
}
}"""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
valid_object = ResponseModel()
valid_object.vendor_details = VendorDetails()
valid_object.vendor_details.oui = "B0ECA1"
valid_object.vendor_details.is_private = 0
valid_object.vendor_details.company_name = "CISCO"
valid_object.vendor_details.company_address = ""
valid_object.vendor_details.country_code = "US"
valid_object.block_details = BlockDetails()
valid_object.block_details.block_found = 1
valid_object.block_details.border_left = "B0ECA1B0ECA1"
valid_object.block_details.border_right = "B0ECE1B0ECA1"
valid_object.block_details.block_size = 32000
valid_object.block_details.assignment_block_size = "MA-M"
valid_object.block_details.date_created = parse("26-09-1999")
valid_object.block_details.date_updated = parse("26-09-2017")
valid_object.mac_address_details = MacAddressDetails()
valid_object.mac_address_details.search_term = "B0ECE1B0ECA1"
valid_object.mac_address_details.is_valid = 1
valid_object.mac_address_details.transmission_type = "multicast"
valid_object.mac_address_details.administration_type = "UAA"
self.assertEqual(client.get('MAC'), valid_object)
def test_vendor_name(self):
payload = "Test Company Name, Inc."
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
self.assertEqual(client.get_vendor("MAC"), payload)
def test_raw_data(self):
payload = """<Response>
<vendorDetails>
<oui>F40F24</oui>
<isPrivate>false</isPrivate>
<companyName>Apple, Inc</companyName>
<companyAddress>1 Infinite Loop Cupertino CA 95014 US</companyAddress>
<countryCode>US</countryCode>
</vendorDetails>
<blockDetails>
<blockFound>true</blockFound>
<borderLeft>F40F240000000000</borderLeft>
<borderRight>F40F24FFFFFFFFFF</borderRight>
<blockSize>1099511627776</blockSize>
<assignmentBlockSize>MA-L</assignmentBlockSize>
<dateCreated></dateCreated>
<dateUpdated></dateUpdated>
</blockDetails>
<macAddressDetails>
<searchTerm>F40F2436DA57</searchTerm>
<isValid>true</isValid>
<transmissionType>multicast</transmissionType>
<administrationType>LAA</administrationType>
</macAddressDetails>
</Response><Response>
<vendorDetails>
<oui>F40F24</oui>
<isPrivate>false</isPrivate>
<companyName>Apple, Inc</companyName>
<companyAddress>1 Infinite Loop Cupertino CA 95014 US</companyAddress>
<countryCode>US</countryCode>
</vendorDetails>
<blockDetails>
<blockFound>true</blockFound>
<borderLeft>F40F240000000000</borderLeft>
<borderRight>F40F24FFFFFFFFFF</borderRight>
<blockSize>1099511627776</blockSize>
<assignmentBlockSize>MA-L</assignmentBlockSize>
<dateCreated></dateCreated>
<dateUpdated></dateUpdated>
</blockDetails>
<macAddressDetails>
<searchTerm>F40F2436DA57</searchTerm>
<isValid>true</isValid>
<transmissionType>multicast</transmissionType>
<administrationType>LAA</administrationType>
</macAddressDetails>
</Response>"""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
self.assertEqual(client.get_raw_data("MAC", "xml"), payload)
def test_get_empty_response(self):
payload = ""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
with self.assertRaises(EmptyResponseException):
client.get("MAC")
def test_get_raw_data_empty_response(self):
payload = ""
fake_requester = MockRequester(payload)
client = ApiClient('test')
client.set_requester(fake_requester)
with self.assertRaises(EmptyResponseException):
client.get_raw_data("MAC", "xml")
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() | 0.59843 | 0.191649 |
import pandas as pd
import csv
from sklearn import neighbors
from sklearn import datasets
import numpy as np
import yellowbrick as yb
from yellowbrick.neighbors import KnnDecisionBoundariesVisualizer
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def load_adm_sat_school_data(return_X_y=False):
with open("./merged_adm_sat_data.csv") as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
df = pd.read_csv("./merged_adm_sat_data.csv", sep=",", usecols=(0, 1, 2, 3), skiprows=0)
data = np.empty((n_samples, n_features), dtype=int)
target = np.ma.empty((n_samples,), dtype=int)
for index, row in df.iterrows():
data[index] = np.asarray([df.iloc[index][0], df.iloc[index][1], df.iloc[index][2]], dtype=np.float)
target[index] = np.asarray(df.iloc[index][3], dtype=np.int)
feature_names = np.array(['ACT_AVG','SAT_AVG','GRAD_DEBT','REGION'])
if return_X_y:
return data, target
return datasets.base.Bunch(data=data, target=target,
target_names=target_names,
DESCR='School Data set',
feature_names=feature_names)
def show_plot(X, y, n_neighbors=10, h=0.2):
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
clf.n_neighbors = n_neighbors
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
if __name__ == '__main__':
school = load_adm_sat_school_data()
X = school.data[:, :2] # we only take the first two features.
y = school.target
#show_plot(X,y,3)
model = neighbors.KNeighborsClassifier(10)
model.fit(X,y)
model.predict(X)
#visualizer = KnnDecisionBoundariesVisualizer(model, classes=school.target_names, features=school.feature_names[:2])
visualizer = KnnDecisionBoundariesVisualizer(model)
visualizer.fit_draw_poof(X, y) | examples/balavenkatesan/testing.py | import pandas as pd
import csv
from sklearn import neighbors
from sklearn import datasets
import numpy as np
import yellowbrick as yb
from yellowbrick.neighbors import KnnDecisionBoundariesVisualizer
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def load_adm_sat_school_data(return_X_y=False):
with open("./merged_adm_sat_data.csv") as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
df = pd.read_csv("./merged_adm_sat_data.csv", sep=",", usecols=(0, 1, 2, 3), skiprows=0)
data = np.empty((n_samples, n_features), dtype=int)
target = np.ma.empty((n_samples,), dtype=int)
for index, row in df.iterrows():
data[index] = np.asarray([df.iloc[index][0], df.iloc[index][1], df.iloc[index][2]], dtype=np.float)
target[index] = np.asarray(df.iloc[index][3], dtype=np.int)
feature_names = np.array(['ACT_AVG','SAT_AVG','GRAD_DEBT','REGION'])
if return_X_y:
return data, target
return datasets.base.Bunch(data=data, target=target,
target_names=target_names,
DESCR='School Data set',
feature_names=feature_names)
def show_plot(X, y, n_neighbors=10, h=0.2):
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
clf.n_neighbors = n_neighbors
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
if __name__ == '__main__':
school = load_adm_sat_school_data()
X = school.data[:, :2] # we only take the first two features.
y = school.target
#show_plot(X,y,3)
model = neighbors.KNeighborsClassifier(10)
model.fit(X,y)
model.predict(X)
#visualizer = KnnDecisionBoundariesVisualizer(model, classes=school.target_names, features=school.feature_names[:2])
visualizer = KnnDecisionBoundariesVisualizer(model)
visualizer.fit_draw_poof(X, y) | 0.693473 | 0.506652 |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import tensorflow as tf
import numpy as np
from art.attacks import UniversalPerturbation
from art.classifiers import KerasClassifier
from art.utils import load_dataset, master_seed
from art.utils_test import get_classifier_tf, get_classifier_kr, get_classifier_pt
from art.utils_test import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt
logger = logging.getLogger('testLogger')
BATCH_SIZE = 100
NB_TRAIN = 500
NB_TEST = 10
class TestUniversalPerturbation(unittest.TestCase):
"""
A unittest class for testing the UniversalPerturbation attack.
"""
@classmethod
def setUpClass(cls):
# Get MNIST
(x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist')
x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]
cls.mnist = (x_train, y_train), (x_test, y_test)
def setUp(self):
# Set master seed
master_seed(1234)
def test_tfclassifier(self):
"""
First test with the TensorFlowClassifier.
:return:
"""
# Build TensorFlowClassifier
tfc, sess = get_classifier_tf()
# Get MNIST
(x_train, y_train), (x_test, y_test) = self.mnist
# Attack
up = UniversalPerturbation(tfc, max_iter=1, attacker="newtonfool", attacker_params={"max_iter": 5})
x_train_adv = up.generate(x_train)
self.assertTrue((up.fooling_rate >= 0.2) or not up.converged)
x_test_adv = x_test + up.noise
self.assertFalse((x_test == x_test_adv).all())
train_y_pred = np.argmax(tfc.predict(x_train_adv), axis=1)
test_y_pred = np.argmax(tfc.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all())
self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
@unittest.skipIf(tf.__version__[0] == '2', reason='Skip unittests for TensorFlow v2 until Keras supports TensorFlow'
' v2 as backend.')
def test_krclassifier(self):
"""
Second test with the KerasClassifier.
:return:
"""
# Build KerasClassifier
krc = get_classifier_kr()
# Get MNIST
(x_train, y_train), (x_test, y_test) = self.mnist
# Attack
up = UniversalPerturbation(krc, max_iter=1, attacker="ead", attacker_params={"max_iter": 5, "targeted": False})
x_train_adv = up.generate(x_train)
self.assertTrue((up.fooling_rate >= 0.2) or not up.converged)
x_test_adv = x_test + up.noise
self.assertFalse((x_test == x_test_adv).all())
train_y_pred = np.argmax(krc.predict(x_train_adv), axis=1)
test_y_pred = np.argmax(krc.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all())
self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
# sess.close()
def test_ptclassifier(self):
"""
Third test with the PyTorchClassifier.
:return:
"""
# Build PyTorchClassifier
ptc = get_classifier_pt()
# Get MNIST
(x_train, y_train), (x_test, y_test) = self.mnist
x_train = np.swapaxes(x_train, 1, 3).astype(np.float32)
x_test = np.swapaxes(x_test, 1, 3).astype(np.float32)
# Attack
up = UniversalPerturbation(ptc, max_iter=1, attacker="newtonfool", attacker_params={"max_iter": 5})
x_train_adv = up.generate(x_train)
self.assertTrue((up.fooling_rate >= 0.2) or not up.converged)
x_test_adv = x_test + up.noise
self.assertFalse((x_test == x_test_adv).all())
train_y_pred = np.argmax(ptc.predict(x_train_adv), axis=1)
test_y_pred = np.argmax(ptc.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all())
self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
def test_classifier_type_check_fail_classifier(self):
# Use a useless test classifier to test basic classifier properties
class ClassifierNoAPI:
pass
classifier = ClassifierNoAPI
with self.assertRaises(TypeError) as context:
_ = UniversalPerturbation(classifier=classifier)
self.assertIn('For `UniversalPerturbation` classifier must be an instance of '
'`art.classifiers.classifier.Classifier`, the provided classifier is instance of '
'(<class \'object\'>,).', str(context.exception))
def test_classifier_type_check_fail_gradients(self):
# Use a test classifier not providing gradients required by white-box attack
from art.classifiers.scikitlearn import ScikitlearnDecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
classifier = ScikitlearnDecisionTreeClassifier(model=DecisionTreeClassifier())
with self.assertRaises(TypeError) as context:
_ = UniversalPerturbation(classifier=classifier)
self.assertIn('For `UniversalPerturbation` classifier must be an instance of '
'`art.classifiers.classifier.ClassifierNeuralNetwork` and '
'`art.classifiers.classifier.ClassifierGradients`, the provided classifier is instance of '
'(<class \'art.classifiers.scikitlearn.ScikitlearnClassifier\'>,).', str(context.exception))
class TestUniversalPerturbationVectors(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get Iris
(x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris')
cls.iris = (x_train, y_train), (x_test, y_test)
def setUp(self):
master_seed(1234)
@unittest.skipIf(tf.__version__[0] == '2', reason='Skip unittests for TensorFlow v2 until Keras supports TensorFlow'
' v2 as backend.')
def test_iris_k_clipped(self):
(_, _), (x_test, y_test) = self.iris
classifier, _ = get_iris_classifier_kr()
# Test untargeted attack
attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
self.assertTrue((x_test_adv <= 1).all())
self.assertTrue((x_test_adv >= 0).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
@unittest.skipIf(tf.__version__[0] == '2', reason='Skip unittests for TensorFlow v2 until Keras supports TensorFlow'
' v2 as backend.')
def test_iris_k_unbounded(self):
(_, _), (x_test, y_test) = self.iris
classifier, _ = get_iris_classifier_kr()
# Recreate a classifier without clip values
classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
def test_iris_tf(self):
(_, _), (x_test, y_test) = self.iris
classifier, _ = get_iris_classifier_tf()
# Test untargeted attack
attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
self.assertTrue((x_test_adv <= 1).all())
self.assertTrue((x_test_adv >= 0).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
def test_iris_pt(self):
(_, _), (x_test, y_test) = self.iris
classifier = get_iris_classifier_pt()
attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
self.assertTrue((x_test_adv <= 1).all())
self.assertTrue((x_test_adv >= 0).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
if __name__ == '__main__':
unittest.main() | tests/attacks/test_universal_perturbation.py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import tensorflow as tf
import numpy as np
from art.attacks import UniversalPerturbation
from art.classifiers import KerasClassifier
from art.utils import load_dataset, master_seed
from art.utils_test import get_classifier_tf, get_classifier_kr, get_classifier_pt
from art.utils_test import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt
logger = logging.getLogger('testLogger')
BATCH_SIZE = 100
NB_TRAIN = 500
NB_TEST = 10
class TestUniversalPerturbation(unittest.TestCase):
"""
A unittest class for testing the UniversalPerturbation attack.
"""
@classmethod
def setUpClass(cls):
# Get MNIST
(x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist')
x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]
cls.mnist = (x_train, y_train), (x_test, y_test)
def setUp(self):
# Set master seed
master_seed(1234)
def test_tfclassifier(self):
"""
First test with the TensorFlowClassifier.
:return:
"""
# Build TensorFlowClassifier
tfc, sess = get_classifier_tf()
# Get MNIST
(x_train, y_train), (x_test, y_test) = self.mnist
# Attack
up = UniversalPerturbation(tfc, max_iter=1, attacker="newtonfool", attacker_params={"max_iter": 5})
x_train_adv = up.generate(x_train)
self.assertTrue((up.fooling_rate >= 0.2) or not up.converged)
x_test_adv = x_test + up.noise
self.assertFalse((x_test == x_test_adv).all())
train_y_pred = np.argmax(tfc.predict(x_train_adv), axis=1)
test_y_pred = np.argmax(tfc.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all())
self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
@unittest.skipIf(tf.__version__[0] == '2', reason='Skip unittests for TensorFlow v2 until Keras supports TensorFlow'
' v2 as backend.')
def test_krclassifier(self):
"""
Second test with the KerasClassifier.
:return:
"""
# Build KerasClassifier
krc = get_classifier_kr()
# Get MNIST
(x_train, y_train), (x_test, y_test) = self.mnist
# Attack
up = UniversalPerturbation(krc, max_iter=1, attacker="ead", attacker_params={"max_iter": 5, "targeted": False})
x_train_adv = up.generate(x_train)
self.assertTrue((up.fooling_rate >= 0.2) or not up.converged)
x_test_adv = x_test + up.noise
self.assertFalse((x_test == x_test_adv).all())
train_y_pred = np.argmax(krc.predict(x_train_adv), axis=1)
test_y_pred = np.argmax(krc.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all())
self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
# sess.close()
def test_ptclassifier(self):
"""
Third test with the PyTorchClassifier.
:return:
"""
# Build PyTorchClassifier
ptc = get_classifier_pt()
# Get MNIST
(x_train, y_train), (x_test, y_test) = self.mnist
x_train = np.swapaxes(x_train, 1, 3).astype(np.float32)
x_test = np.swapaxes(x_test, 1, 3).astype(np.float32)
# Attack
up = UniversalPerturbation(ptc, max_iter=1, attacker="newtonfool", attacker_params={"max_iter": 5})
x_train_adv = up.generate(x_train)
self.assertTrue((up.fooling_rate >= 0.2) or not up.converged)
x_test_adv = x_test + up.noise
self.assertFalse((x_test == x_test_adv).all())
train_y_pred = np.argmax(ptc.predict(x_train_adv), axis=1)
test_y_pred = np.argmax(ptc.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all())
self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
def test_classifier_type_check_fail_classifier(self):
# Use a useless test classifier to test basic classifier properties
class ClassifierNoAPI:
pass
classifier = ClassifierNoAPI
with self.assertRaises(TypeError) as context:
_ = UniversalPerturbation(classifier=classifier)
self.assertIn('For `UniversalPerturbation` classifier must be an instance of '
'`art.classifiers.classifier.Classifier`, the provided classifier is instance of '
'(<class \'object\'>,).', str(context.exception))
def test_classifier_type_check_fail_gradients(self):
# Use a test classifier not providing gradients required by white-box attack
from art.classifiers.scikitlearn import ScikitlearnDecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
classifier = ScikitlearnDecisionTreeClassifier(model=DecisionTreeClassifier())
with self.assertRaises(TypeError) as context:
_ = UniversalPerturbation(classifier=classifier)
self.assertIn('For `UniversalPerturbation` classifier must be an instance of '
'`art.classifiers.classifier.ClassifierNeuralNetwork` and '
'`art.classifiers.classifier.ClassifierGradients`, the provided classifier is instance of '
'(<class \'art.classifiers.scikitlearn.ScikitlearnClassifier\'>,).', str(context.exception))
class TestUniversalPerturbationVectors(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get Iris
(x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris')
cls.iris = (x_train, y_train), (x_test, y_test)
def setUp(self):
master_seed(1234)
@unittest.skipIf(tf.__version__[0] == '2', reason='Skip unittests for TensorFlow v2 until Keras supports TensorFlow'
' v2 as backend.')
def test_iris_k_clipped(self):
(_, _), (x_test, y_test) = self.iris
classifier, _ = get_iris_classifier_kr()
# Test untargeted attack
attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
self.assertTrue((x_test_adv <= 1).all())
self.assertTrue((x_test_adv >= 0).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
@unittest.skipIf(tf.__version__[0] == '2', reason='Skip unittests for TensorFlow v2 until Keras supports TensorFlow'
' v2 as backend.')
def test_iris_k_unbounded(self):
(_, _), (x_test, y_test) = self.iris
classifier, _ = get_iris_classifier_kr()
# Recreate a classifier without clip values
classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
def test_iris_tf(self):
(_, _), (x_test, y_test) = self.iris
classifier, _ = get_iris_classifier_tf()
# Test untargeted attack
attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
self.assertTrue((x_test_adv <= 1).all())
self.assertTrue((x_test_adv >= 0).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
def test_iris_pt(self):
(_, _), (x_test, y_test) = self.iris
classifier = get_iris_classifier_pt()
attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}}
attack = UniversalPerturbation(classifier)
attack.set_params(**attack_params)
x_test_adv = attack.generate(x_test)
self.assertFalse((x_test == x_test_adv).all())
self.assertTrue((x_test_adv <= 1).all())
self.assertTrue((x_test_adv >= 0).all())
preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100))
if __name__ == '__main__':
unittest.main() | 0.856077 | 0.640889 |
import sys
import types
import collections
import io
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["code_info", "dis", "disassemble", "distb", "disco",
"findlinestarts", "findlabels", "show_code",
"get_instructions", "Instruction", "Bytecode"] + _opcodes_all
del _opcodes_all
_have_code = (types.MethodType, types.FunctionType, types.CodeType,
classmethod, staticmethod, type)
FORMAT_VALUE = opmap['FORMAT_VALUE']
def _try_compile(source, name):
"""Attempts to compile the given source, first as an expression and
then as a statement if the first approach fails.
Utility function to accept strings in functions that otherwise
expect code objects
"""
try:
c = compile(source, name, 'eval')
except SyntaxError:
c = compile(source, name, 'exec')
return c
def dis(x=None, *, file=None):
"""Disassemble classes, methods, functions, generators, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb(file=file)
return
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if hasattr(x, '__dict__'): # Class or module
items = sorted(x.__dict__.items())
for name, x1 in items:
if isinstance(x1, _have_code):
print("Disassembly of %s:" % name, file=file)
try:
dis(x1, file=file)
except TypeError as msg:
print("Sorry:", msg, file=file)
print(file=file)
elif hasattr(x, 'co_code'): # Code object
disassemble(x, file=file)
elif isinstance(x, (bytes, bytearray)): # Raw bytecode
_disassemble_bytes(x, file=file)
elif isinstance(x, str): # Source code
_disassemble_str(x, file=file)
else:
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def distb(tb=None, *, file=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file)
# The inspect module interrogates this dictionary to build its
# list of CO_* constants. It is also used by pretty_flags to
# turn the co_flags field into a human readable list.
COMPILER_FLAG_NAMES = {
1: "OPTIMIZED",
2: "NEWLOCALS",
4: "VARARGS",
8: "VARKEYWORDS",
16: "NESTED",
32: "GENERATOR",
64: "NOFREE",
128: "COROUTINE",
256: "ITERABLE_COROUTINE",
512: "ASYNC_GENERATOR",
}
def pretty_flags(flags):
"""Return pretty representation of code flags."""
names = []
for i in range(32):
flag = 1<<i
if flags & flag:
names.append(COMPILER_FLAG_NAMES.get(flag, hex(flag)))
flags ^= flag
if not flags:
break
else:
names.append(hex(flags))
return ", ".join(names)
def _get_code_object(x):
"""Helper to handle methods, functions, generators, strings and raw code objects"""
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if isinstance(x, str): # Source code
x = _try_compile(x, "<disassembly>")
if hasattr(x, 'co_code'): # Code object
return x
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def code_info(x):
"""Formatted details of methods, functions, or code."""
return _format_code_info(_get_code_object(x))
def _format_code_info(co):
lines = []
lines.append("Name: %s" % co.co_name)
lines.append("Filename: %s" % co.co_filename)
lines.append("Argument count: %s" % co.co_argcount)
lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount)
lines.append("Number of locals: %s" % co.co_nlocals)
lines.append("Stack size: %s" % co.co_stacksize)
lines.append("Flags: %s" % pretty_flags(co.co_flags))
if co.co_consts:
lines.append("Constants:")
for i_c in enumerate(co.co_consts):
lines.append("%4d: %r" % i_c)
if co.co_names:
lines.append("Names:")
for i_n in enumerate(co.co_names):
lines.append("%4d: %s" % i_n)
if co.co_varnames:
lines.append("Variable names:")
for i_n in enumerate(co.co_varnames):
lines.append("%4d: %s" % i_n)
if co.co_freevars:
lines.append("Free variables:")
for i_n in enumerate(co.co_freevars):
lines.append("%4d: %s" % i_n)
if co.co_cellvars:
lines.append("Cell variables:")
for i_n in enumerate(co.co_cellvars):
lines.append("%4d: %s" % i_n)
return "\n".join(lines)
def show_code(co, *, file=None):
"""Print details of methods, functions, or code to *file*.
If *file* is not provided, the output is printed on stdout.
"""
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
_Instruction.opname.__doc__ = "Human readable name for operation"
_Instruction.opcode.__doc__ = "Numeric code for operation"
_Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None"
_Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg"
_Instruction.argrepr.__doc__ = "Human readable description of operation argument"
_Instruction.offset.__doc__ = "Start index of operation within bytecode sequence"
_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None"
_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False"
class Instruction(_Instruction):
"""Details for a bytecode operation
Defined fields:
opname - human readable name for operation
opcode - numeric code for operation
arg - numeric argument to operation (if any), otherwise None
argval - resolved arg value (if known), otherwise same as arg
argrepr - human readable description of operation argument
offset - start index of operation within bytecode sequence
starts_line - line started by this opcode (if any), otherwise None
is_jump_target - True if other code jumps to here, otherwise False
"""
def _disassemble(self, lineno_width=3, mark_as_current=False):
"""Format instruction details for inclusion in disassembly output
*lineno_width* sets the width of the line number field (0 omits it)
*mark_as_current* inserts a '-->' marker arrow as part of the line
"""
fields = []
# Column: Source code line number
if lineno_width:
if self.starts_line is not None:
lineno_fmt = "%%%dd" % lineno_width
fields.append(lineno_fmt % self.starts_line)
else:
fields.append(' ' * lineno_width)
# Column: Current instruction indicator
if mark_as_current:
fields.append('-->')
else:
fields.append(' ')
# Column: Jump target marker
if self.is_jump_target:
fields.append('>>')
else:
fields.append(' ')
# Column: Instruction offset from start of code sequence
fields.append(repr(self.offset).rjust(4))
# Column: Opcode name
fields.append(self.opname.ljust(20))
# Column: Opcode argument
if self.arg is not None:
fields.append(repr(self.arg).rjust(5))
# Column: Opcode argument details
if self.argrepr:
fields.append('(' + self.argrepr + ')')
return ' '.join(fields).rstrip()
def get_instructions(x, *, first_line=None):
"""Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
"""
co = _get_code_object(x)
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
if first_line is not None:
line_offset = first_line - co.co_firstlineno
else:
line_offset = 0
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts,
line_offset)
def _get_const_info(const_index, const_list):
"""Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
"""
argval = const_index
if const_list is not None:
argval = const_list[const_index]
return argval, repr(argval)
def _get_name_info(name_index, name_list):
"""Helper to get optional details about named references
Returns the dereferenced name as both value and repr if the name
list is defined.
Otherwise returns the name index and its repr().
"""
argval = name_index
if name_list is not None:
argval = name_list[name_index]
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr
def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
cells=None, linestarts=None, line_offset=0):
"""Iterate over the instructions in a bytecode string.
Generates a sequence of Instruction namedtuples giving the details of each
opcode. Additional information about the code's runtime environment
(e.g. variable names, constants) can be specified using optional
arguments.
"""
labels = findlabels(code)
starts_line = None
for offset, op, arg in _unpack_opargs(code):
if linestarts is not None:
starts_line = linestarts.get(offset, None)
if starts_line is not None:
starts_line += line_offset
is_jump_target = offset in labels
argval = None
argrepr = ''
if arg is not None:
# Set argval to the dereferenced value of the argument when
# available, and argrepr to the string representation of argval.
# _disassemble_bytes needs the string repr of the
# raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
argval = arg
if op in hasconst:
argval, argrepr = _get_const_info(arg, constants)
elif op in hasname:
argval, argrepr = _get_name_info(arg, names)
elif op in hasjrel:
argval = offset + 2 + arg
argrepr = "to " + repr(argval)
elif op in haslocal:
argval, argrepr = _get_name_info(arg, varnames)
elif op in hascompare:
argval = cmp_op[arg]
argrepr = argval
elif op in hasfree:
argval, argrepr = _get_name_info(arg, cells)
elif op == FORMAT_VALUE:
argval = ((None, str, repr, ascii)[arg & 0x3], bool(arg & 0x4))
argrepr = ('', 'str', 'repr', 'ascii')[arg & 0x3]
if argval[1]:
if argrepr:
argrepr += ', '
argrepr += 'with format'
yield Instruction(opname[op], op,
arg, argval, argrepr,
offset, starts_line, is_jump_target)
def disassemble(co, lasti=-1, *, file=None):
"""Disassemble a code object."""
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
_disassemble_bytes(co.co_code, lasti, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts, file=file)
def _disassemble_bytes(code, lasti=-1, varnames=None, names=None,
constants=None, cells=None, linestarts=None,
*, file=None, line_offset=0):
# Omit the line number column entirely if we have no line number info
show_lineno = linestarts is not None
# TODO?: Adjust width upwards if max(linestarts.values()) >= 1000?
lineno_width = 3 if show_lineno else 0
for instr in _get_instructions_bytes(code, varnames, names,
constants, cells, linestarts,
line_offset=line_offset):
new_source_line = (show_lineno and
instr.starts_line is not None and
instr.offset > 0)
if new_source_line:
print(file=file)
is_current_instr = instr.offset == lasti
print(instr._disassemble(lineno_width, is_current_instr), file=file)
def _disassemble_str(source, *, file=None):
"""Compile the source string, then disassemble the code object."""
disassemble(_try_compile(source, '<dis>'), file=file)
disco = disassemble # XXX For backwards compatibility
def _unpack_opargs(code):
extended_arg = 0
for i in range(0, len(code), 2):
op = code[i]
if op >= HAVE_ARGUMENT:
arg = code[i+1] | extended_arg
extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
else:
arg = None
yield (i, op, arg)
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
for offset, op, arg in _unpack_opargs(code):
if arg is not None:
if op in hasjrel:
label = offset + 2 + arg
elif op in hasjabs:
label = arg
else:
continue
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = code.co_lnotab[0::2]
line_increments = code.co_lnotab[1::2]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
if line_incr >= 0x80:
# line_increments is an array of 8-bit signed integers
line_incr -= 0x100
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
class Bytecode:
"""The bytecode operations of a piece of code
Instantiate this with a function, method, string of code, or a code object
(as returned by compile()).
Iterating over this yields the bytecode operations as Instruction instances.
"""
def __init__(self, x, *, first_line=None, current_offset=None):
self.codeobj = co = _get_code_object(x)
if first_line is None:
self.first_line = co.co_firstlineno
self._line_offset = 0
else:
self.first_line = first_line
self._line_offset = first_line - co.co_firstlineno
self._cell_names = co.co_cellvars + co.co_freevars
self._linestarts = dict(findlinestarts(co))
self._original_object = x
self.current_offset = current_offset
def __iter__(self):
co = self.codeobj
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, self._cell_names,
self._linestarts,
line_offset=self._line_offset)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__,
self._original_object)
@classmethod
def from_traceback(cls, tb):
""" Construct a Bytecode from the given traceback """
while tb.tb_next:
tb = tb.tb_next
return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
def info(self):
"""Return formatted information about the code object."""
return _format_code_info(self.codeobj)
def dis(self):
"""Return a formatted view of the bytecode operations."""
co = self.codeobj
if self.current_offset is not None:
offset = self.current_offset
else:
offset = -1
with io.StringIO() as output:
_disassemble_bytes(co.co_code, varnames=co.co_varnames,
names=co.co_names, constants=co.co_consts,
cells=self._cell_names,
linestarts=self._linestarts,
line_offset=self._line_offset,
file=output,
lasti=offset)
return output.getvalue()
def _test():
"""Simple test program to disassemble a file."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
args = parser.parse_args()
with args.infile as infile:
source = infile.read()
code = compile(source, args.infile.name, "exec")
dis(code)
if __name__ == "__main__":
_test() | contrib/tools/python3/src/Lib/dis.py |
import sys
import types
import collections
import io
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["code_info", "dis", "disassemble", "distb", "disco",
"findlinestarts", "findlabels", "show_code",
"get_instructions", "Instruction", "Bytecode"] + _opcodes_all
del _opcodes_all
_have_code = (types.MethodType, types.FunctionType, types.CodeType,
classmethod, staticmethod, type)
FORMAT_VALUE = opmap['FORMAT_VALUE']
def _try_compile(source, name):
"""Attempts to compile the given source, first as an expression and
then as a statement if the first approach fails.
Utility function to accept strings in functions that otherwise
expect code objects
"""
try:
c = compile(source, name, 'eval')
except SyntaxError:
c = compile(source, name, 'exec')
return c
def dis(x=None, *, file=None):
"""Disassemble classes, methods, functions, generators, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb(file=file)
return
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if hasattr(x, '__dict__'): # Class or module
items = sorted(x.__dict__.items())
for name, x1 in items:
if isinstance(x1, _have_code):
print("Disassembly of %s:" % name, file=file)
try:
dis(x1, file=file)
except TypeError as msg:
print("Sorry:", msg, file=file)
print(file=file)
elif hasattr(x, 'co_code'): # Code object
disassemble(x, file=file)
elif isinstance(x, (bytes, bytearray)): # Raw bytecode
_disassemble_bytes(x, file=file)
elif isinstance(x, str): # Source code
_disassemble_str(x, file=file)
else:
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def distb(tb=None, *, file=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file)
# The inspect module interrogates this dictionary to build its
# list of CO_* constants. It is also used by pretty_flags to
# turn the co_flags field into a human readable list.
COMPILER_FLAG_NAMES = {
1: "OPTIMIZED",
2: "NEWLOCALS",
4: "VARARGS",
8: "VARKEYWORDS",
16: "NESTED",
32: "GENERATOR",
64: "NOFREE",
128: "COROUTINE",
256: "ITERABLE_COROUTINE",
512: "ASYNC_GENERATOR",
}
def pretty_flags(flags):
"""Return pretty representation of code flags."""
names = []
for i in range(32):
flag = 1<<i
if flags & flag:
names.append(COMPILER_FLAG_NAMES.get(flag, hex(flag)))
flags ^= flag
if not flags:
break
else:
names.append(hex(flags))
return ", ".join(names)
def _get_code_object(x):
"""Helper to handle methods, functions, generators, strings and raw code objects"""
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if isinstance(x, str): # Source code
x = _try_compile(x, "<disassembly>")
if hasattr(x, 'co_code'): # Code object
return x
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def code_info(x):
"""Formatted details of methods, functions, or code."""
return _format_code_info(_get_code_object(x))
def _format_code_info(co):
lines = []
lines.append("Name: %s" % co.co_name)
lines.append("Filename: %s" % co.co_filename)
lines.append("Argument count: %s" % co.co_argcount)
lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount)
lines.append("Number of locals: %s" % co.co_nlocals)
lines.append("Stack size: %s" % co.co_stacksize)
lines.append("Flags: %s" % pretty_flags(co.co_flags))
if co.co_consts:
lines.append("Constants:")
for i_c in enumerate(co.co_consts):
lines.append("%4d: %r" % i_c)
if co.co_names:
lines.append("Names:")
for i_n in enumerate(co.co_names):
lines.append("%4d: %s" % i_n)
if co.co_varnames:
lines.append("Variable names:")
for i_n in enumerate(co.co_varnames):
lines.append("%4d: %s" % i_n)
if co.co_freevars:
lines.append("Free variables:")
for i_n in enumerate(co.co_freevars):
lines.append("%4d: %s" % i_n)
if co.co_cellvars:
lines.append("Cell variables:")
for i_n in enumerate(co.co_cellvars):
lines.append("%4d: %s" % i_n)
return "\n".join(lines)
def show_code(co, *, file=None):
"""Print details of methods, functions, or code to *file*.
If *file* is not provided, the output is printed on stdout.
"""
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
_Instruction.opname.__doc__ = "Human readable name for operation"
_Instruction.opcode.__doc__ = "Numeric code for operation"
_Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None"
_Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg"
_Instruction.argrepr.__doc__ = "Human readable description of operation argument"
_Instruction.offset.__doc__ = "Start index of operation within bytecode sequence"
_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None"
_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False"
class Instruction(_Instruction):
"""Details for a bytecode operation
Defined fields:
opname - human readable name for operation
opcode - numeric code for operation
arg - numeric argument to operation (if any), otherwise None
argval - resolved arg value (if known), otherwise same as arg
argrepr - human readable description of operation argument
offset - start index of operation within bytecode sequence
starts_line - line started by this opcode (if any), otherwise None
is_jump_target - True if other code jumps to here, otherwise False
"""
def _disassemble(self, lineno_width=3, mark_as_current=False):
"""Format instruction details for inclusion in disassembly output
*lineno_width* sets the width of the line number field (0 omits it)
*mark_as_current* inserts a '-->' marker arrow as part of the line
"""
fields = []
# Column: Source code line number
if lineno_width:
if self.starts_line is not None:
lineno_fmt = "%%%dd" % lineno_width
fields.append(lineno_fmt % self.starts_line)
else:
fields.append(' ' * lineno_width)
# Column: Current instruction indicator
if mark_as_current:
fields.append('-->')
else:
fields.append(' ')
# Column: Jump target marker
if self.is_jump_target:
fields.append('>>')
else:
fields.append(' ')
# Column: Instruction offset from start of code sequence
fields.append(repr(self.offset).rjust(4))
# Column: Opcode name
fields.append(self.opname.ljust(20))
# Column: Opcode argument
if self.arg is not None:
fields.append(repr(self.arg).rjust(5))
# Column: Opcode argument details
if self.argrepr:
fields.append('(' + self.argrepr + ')')
return ' '.join(fields).rstrip()
def get_instructions(x, *, first_line=None):
"""Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
"""
co = _get_code_object(x)
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
if first_line is not None:
line_offset = first_line - co.co_firstlineno
else:
line_offset = 0
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts,
line_offset)
def _get_const_info(const_index, const_list):
"""Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
"""
argval = const_index
if const_list is not None:
argval = const_list[const_index]
return argval, repr(argval)
def _get_name_info(name_index, name_list):
"""Helper to get optional details about named references
Returns the dereferenced name as both value and repr if the name
list is defined.
Otherwise returns the name index and its repr().
"""
argval = name_index
if name_list is not None:
argval = name_list[name_index]
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr
def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
cells=None, linestarts=None, line_offset=0):
"""Iterate over the instructions in a bytecode string.
Generates a sequence of Instruction namedtuples giving the details of each
opcode. Additional information about the code's runtime environment
(e.g. variable names, constants) can be specified using optional
arguments.
"""
labels = findlabels(code)
starts_line = None
for offset, op, arg in _unpack_opargs(code):
if linestarts is not None:
starts_line = linestarts.get(offset, None)
if starts_line is not None:
starts_line += line_offset
is_jump_target = offset in labels
argval = None
argrepr = ''
if arg is not None:
# Set argval to the dereferenced value of the argument when
# available, and argrepr to the string representation of argval.
# _disassemble_bytes needs the string repr of the
# raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
argval = arg
if op in hasconst:
argval, argrepr = _get_const_info(arg, constants)
elif op in hasname:
argval, argrepr = _get_name_info(arg, names)
elif op in hasjrel:
argval = offset + 2 + arg
argrepr = "to " + repr(argval)
elif op in haslocal:
argval, argrepr = _get_name_info(arg, varnames)
elif op in hascompare:
argval = cmp_op[arg]
argrepr = argval
elif op in hasfree:
argval, argrepr = _get_name_info(arg, cells)
elif op == FORMAT_VALUE:
argval = ((None, str, repr, ascii)[arg & 0x3], bool(arg & 0x4))
argrepr = ('', 'str', 'repr', 'ascii')[arg & 0x3]
if argval[1]:
if argrepr:
argrepr += ', '
argrepr += 'with format'
yield Instruction(opname[op], op,
arg, argval, argrepr,
offset, starts_line, is_jump_target)
def disassemble(co, lasti=-1, *, file=None):
"""Disassemble a code object."""
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
_disassemble_bytes(co.co_code, lasti, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts, file=file)
def _disassemble_bytes(code, lasti=-1, varnames=None, names=None,
constants=None, cells=None, linestarts=None,
*, file=None, line_offset=0):
# Omit the line number column entirely if we have no line number info
show_lineno = linestarts is not None
# TODO?: Adjust width upwards if max(linestarts.values()) >= 1000?
lineno_width = 3 if show_lineno else 0
for instr in _get_instructions_bytes(code, varnames, names,
constants, cells, linestarts,
line_offset=line_offset):
new_source_line = (show_lineno and
instr.starts_line is not None and
instr.offset > 0)
if new_source_line:
print(file=file)
is_current_instr = instr.offset == lasti
print(instr._disassemble(lineno_width, is_current_instr), file=file)
def _disassemble_str(source, *, file=None):
"""Compile the source string, then disassemble the code object."""
disassemble(_try_compile(source, '<dis>'), file=file)
disco = disassemble # XXX For backwards compatibility
def _unpack_opargs(code):
extended_arg = 0
for i in range(0, len(code), 2):
op = code[i]
if op >= HAVE_ARGUMENT:
arg = code[i+1] | extended_arg
extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
else:
arg = None
yield (i, op, arg)
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
for offset, op, arg in _unpack_opargs(code):
if arg is not None:
if op in hasjrel:
label = offset + 2 + arg
elif op in hasjabs:
label = arg
else:
continue
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = code.co_lnotab[0::2]
line_increments = code.co_lnotab[1::2]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
if line_incr >= 0x80:
# line_increments is an array of 8-bit signed integers
line_incr -= 0x100
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
class Bytecode:
"""The bytecode operations of a piece of code
Instantiate this with a function, method, string of code, or a code object
(as returned by compile()).
Iterating over this yields the bytecode operations as Instruction instances.
"""
def __init__(self, x, *, first_line=None, current_offset=None):
self.codeobj = co = _get_code_object(x)
if first_line is None:
self.first_line = co.co_firstlineno
self._line_offset = 0
else:
self.first_line = first_line
self._line_offset = first_line - co.co_firstlineno
self._cell_names = co.co_cellvars + co.co_freevars
self._linestarts = dict(findlinestarts(co))
self._original_object = x
self.current_offset = current_offset
def __iter__(self):
co = self.codeobj
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, self._cell_names,
self._linestarts,
line_offset=self._line_offset)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__,
self._original_object)
@classmethod
def from_traceback(cls, tb):
""" Construct a Bytecode from the given traceback """
while tb.tb_next:
tb = tb.tb_next
return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
def info(self):
"""Return formatted information about the code object."""
return _format_code_info(self.codeobj)
def dis(self):
"""Return a formatted view of the bytecode operations."""
co = self.codeobj
if self.current_offset is not None:
offset = self.current_offset
else:
offset = -1
with io.StringIO() as output:
_disassemble_bytes(co.co_code, varnames=co.co_varnames,
names=co.co_names, constants=co.co_consts,
cells=self._cell_names,
linestarts=self._linestarts,
line_offset=self._line_offset,
file=output,
lasti=offset)
return output.getvalue()
def _test():
"""Simple test program to disassemble a file."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
args = parser.parse_args()
with args.infile as infile:
source = infile.read()
code = compile(source, args.infile.name, "exec")
dis(code)
if __name__ == "__main__":
_test() | 0.364099 | 0.186095 |
import numpy as np
from ... import opcodes
from ...config import options
from ...core import OutputType, ENTITY_TYPE
from ...serialization.serializables import BoolField
from .core import DataFrameReductionOperand, DataFrameReductionMixin
class DataFrameSkew(DataFrameReductionOperand, DataFrameReductionMixin):
_op_type_ = opcodes.SKEW
_func_name = "skew"
_bias = BoolField("bias")
def __init__(self, bias=None, **kw):
super().__init__(_bias=bias, **kw)
@property
def bias(self):
return self._bias
@classmethod
def get_reduction_callable(cls, op):
from .aggregation import where_function
skipna, bias = op.skipna, op.bias
def skew(x):
cnt = x.count()
mean = x.mean(skipna=skipna)
divided = (
(x ** 3).mean(skipna=skipna)
- 3 * (x ** 2).mean(skipna=skipna) * mean
+ 2 * mean ** 3
)
var = x.var(skipna=skipna, ddof=0)
if isinstance(var, ENTITY_TYPE) or var > 0:
val = where_function(var > 0, divided / var ** 1.5, np.nan)
else:
val = np.nan
if not bias:
val = where_function(
(var > 0) & (cnt > 2),
val * ((cnt * (cnt - 1)) ** 0.5 / (cnt - 2)),
np.nan,
)
return val
return skew
def skew_series(
df, axis=None, skipna=True, level=None, combine_size=None, bias=False, method=None
):
use_inf_as_na = options.dataframe.mode.use_inf_as_na
op = DataFrameSkew(
axis=axis,
skipna=skipna,
level=level,
combine_size=combine_size,
bias=bias,
output_types=[OutputType.scalar],
use_inf_as_na=use_inf_as_na,
method=method,
)
return op(df)
def skew_dataframe(
df,
axis=None,
skipna=True,
level=None,
numeric_only=None,
combine_size=None,
bias=False,
method=None,
):
use_inf_as_na = options.dataframe.mode.use_inf_as_na
op = DataFrameSkew(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
bias=bias,
combine_size=combine_size,
output_types=[OutputType.series],
use_inf_as_na=use_inf_as_na,
method=method,
)
return op(df) | mars/dataframe/reduction/skew.py |
import numpy as np
from ... import opcodes
from ...config import options
from ...core import OutputType, ENTITY_TYPE
from ...serialization.serializables import BoolField
from .core import DataFrameReductionOperand, DataFrameReductionMixin
class DataFrameSkew(DataFrameReductionOperand, DataFrameReductionMixin):
_op_type_ = opcodes.SKEW
_func_name = "skew"
_bias = BoolField("bias")
def __init__(self, bias=None, **kw):
super().__init__(_bias=bias, **kw)
@property
def bias(self):
return self._bias
@classmethod
def get_reduction_callable(cls, op):
from .aggregation import where_function
skipna, bias = op.skipna, op.bias
def skew(x):
cnt = x.count()
mean = x.mean(skipna=skipna)
divided = (
(x ** 3).mean(skipna=skipna)
- 3 * (x ** 2).mean(skipna=skipna) * mean
+ 2 * mean ** 3
)
var = x.var(skipna=skipna, ddof=0)
if isinstance(var, ENTITY_TYPE) or var > 0:
val = where_function(var > 0, divided / var ** 1.5, np.nan)
else:
val = np.nan
if not bias:
val = where_function(
(var > 0) & (cnt > 2),
val * ((cnt * (cnt - 1)) ** 0.5 / (cnt - 2)),
np.nan,
)
return val
return skew
def skew_series(
df, axis=None, skipna=True, level=None, combine_size=None, bias=False, method=None
):
use_inf_as_na = options.dataframe.mode.use_inf_as_na
op = DataFrameSkew(
axis=axis,
skipna=skipna,
level=level,
combine_size=combine_size,
bias=bias,
output_types=[OutputType.scalar],
use_inf_as_na=use_inf_as_na,
method=method,
)
return op(df)
def skew_dataframe(
df,
axis=None,
skipna=True,
level=None,
numeric_only=None,
combine_size=None,
bias=False,
method=None,
):
use_inf_as_na = options.dataframe.mode.use_inf_as_na
op = DataFrameSkew(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
bias=bias,
combine_size=combine_size,
output_types=[OutputType.series],
use_inf_as_na=use_inf_as_na,
method=method,
)
return op(df) | 0.760384 | 0.379637 |
import pytest
from investpy import get_stock_historical_data
import trendet
def test_errors():
"""
This function raises trendet errors to improve coverage
"""
params = [
{
'stock': ['error'],
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': None,
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'error',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': None,
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'error',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': ['error'],
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': None,
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': None,
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2019',
'to_date': '01/01/2018',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01-2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '_01*01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 0,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': -1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': None,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': None,
'trend_limit': 1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 'error',
'trend_limit': 1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 1,
'trend_limit': 'error',
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 2,
'trend_limit': 5,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': ['a', 'b'],
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': 'error',
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': ['error'],
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'error',
},
]
for param in params:
try:
trendet.identify_trends(stock=param['stock'],
country=param['country'],
from_date=param['from_date'],
to_date=param['to_date'],
window_size=param['window_size'],
trend_limit=param['trend_limit'],
labels=param['labels'],
identify=param['identify'])
except:
pass
try:
trendet.identify_all_trends(stock=param['stock'],
country=param['country'],
from_date=param['from_date'],
to_date=param['to_date'],
window_size=param['window_size'],
identify=param['identify'])
except:
pass
df = get_stock_historical_data(stock='REP',
country='Spain',
from_date='01/01/2018',
to_date='01/01/2019')
df['error'] = 'error'
params = [
{
'df': None,
'column': 'Close',
'window_size': 5,
'identify': 'both'
},
{
'df': ['error'],
'column': 'Close',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': None,
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': ['error'],
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'error',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'error',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': None,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': 1,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': 5,
'identify': ['error']
},
{
'df': df,
'column': 'Close',
'window_size': 5,
'identify': 'error'
},
]
for param in params:
try:
trendet.identify_df_trends(df=param['df'],
column=param['column'],
window_size=param['window_size'],
identify=param['identify'])
except:
pass
if __name__ == '__main__':
test_errors() | tests/test_trendet_errors.py |
import pytest
from investpy import get_stock_historical_data
import trendet
def test_errors():
"""
This function raises trendet errors to improve coverage
"""
params = [
{
'stock': ['error'],
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': None,
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'error',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': None,
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'error',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': ['error'],
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': None,
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': None,
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2019',
'to_date': '01/01/2018',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01-2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '_01*01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 0,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': -1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': None,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': None,
'trend_limit': 1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 'error',
'trend_limit': 1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 1,
'trend_limit': 'error',
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 2,
'trend_limit': 5,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': ['a', 'b'],
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': 'error',
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': ['error'],
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'error',
},
]
for param in params:
try:
trendet.identify_trends(stock=param['stock'],
country=param['country'],
from_date=param['from_date'],
to_date=param['to_date'],
window_size=param['window_size'],
trend_limit=param['trend_limit'],
labels=param['labels'],
identify=param['identify'])
except:
pass
try:
trendet.identify_all_trends(stock=param['stock'],
country=param['country'],
from_date=param['from_date'],
to_date=param['to_date'],
window_size=param['window_size'],
identify=param['identify'])
except:
pass
df = get_stock_historical_data(stock='REP',
country='Spain',
from_date='01/01/2018',
to_date='01/01/2019')
df['error'] = 'error'
params = [
{
'df': None,
'column': 'Close',
'window_size': 5,
'identify': 'both'
},
{
'df': ['error'],
'column': 'Close',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': None,
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': ['error'],
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'error',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'error',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': None,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': 1,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': 5,
'identify': ['error']
},
{
'df': df,
'column': 'Close',
'window_size': 5,
'identify': 'error'
},
]
for param in params:
try:
trendet.identify_df_trends(df=param['df'],
column=param['column'],
window_size=param['window_size'],
identify=param['identify'])
except:
pass
if __name__ == '__main__':
test_errors() | 0.361503 | 0.278665 |
import copy
import chainer
from chainer import reporter
from chainercv.evaluations import eval_semantic_segmentation
from chainercv.utils import apply_to_iterator
import pandas
import six
import tqdm
from instance_occlsegm_lib.contrib.synthetic2d.evaluations import \
eval_instseg_voc
from ..evaluations import eval_occlusion_segmentation
class PanopticSegmentationVOCEvaluator(chainer.training.extensions.Evaluator):
name = 'validation'
def __init__(self, iterator, target, device=None,
use_07_metric=False, label_names=None, show_progress=False):
super(PanopticSegmentationVOCEvaluator, self).__init__(
iterator=iterator, target=target, device=device)
self.use_07_metric = use_07_metric
self.label_names = label_names
self._show_progress = show_progress
def evaluate(self):
target = self._targets['main']
iterators = six.itervalues(self._iterators)
total = len(self._iterators)
if self._show_progress:
iterators = tqdm.tqdm(iterators, total=total, leave=False)
reports = []
for iterator in iterators:
report = self._evaluate_one(target, iterator)
reports.append(report)
report = pandas.DataFrame(reports).mean(skipna=True).to_dict()
observation = dict()
with reporter.report_scope(observation):
reporter.report(report, target)
return observation
def _evaluate_one(self, target, iterator):
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
if self._show_progress:
it = tqdm.tqdm(it, total=len(it.dataset), leave=False)
in_values, out_values, rest_values = apply_to_iterator(
target.predict, it)
imgs, = in_values
pred_bboxes, pred_masks, pred_labels, pred_scores, \
pred_lbls_vis, pred_lbls_occ = out_values
gt_bboxes, gt_labels, gt_masks, gt_lbls_vis, gt_lbls_occ = rest_values
# evaluate
result = eval_instseg_voc(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, None,
use_07_metric=self.use_07_metric,
)
result_lbl_vis = eval_semantic_segmentation(pred_lbls_vis, gt_lbls_vis)
result['miou/vis'] = result_lbl_vis['miou']
result_lbl_occ = eval_occlusion_segmentation(
pred_lbls_occ, gt_lbls_occ
)
result['miou/occ'] = result_lbl_occ['miou']
result['miou'] = (result['miou/vis'] + result['miou/occ']) / 2.
return result | demos/instance_occlsegm/instance_occlsegm_lib/contrib/instance_occlsegm/extensions/panoptic_segmentation_voc_evaluator.py | import copy
import chainer
from chainer import reporter
from chainercv.evaluations import eval_semantic_segmentation
from chainercv.utils import apply_to_iterator
import pandas
import six
import tqdm
from instance_occlsegm_lib.contrib.synthetic2d.evaluations import \
eval_instseg_voc
from ..evaluations import eval_occlusion_segmentation
class PanopticSegmentationVOCEvaluator(chainer.training.extensions.Evaluator):
name = 'validation'
def __init__(self, iterator, target, device=None,
use_07_metric=False, label_names=None, show_progress=False):
super(PanopticSegmentationVOCEvaluator, self).__init__(
iterator=iterator, target=target, device=device)
self.use_07_metric = use_07_metric
self.label_names = label_names
self._show_progress = show_progress
def evaluate(self):
target = self._targets['main']
iterators = six.itervalues(self._iterators)
total = len(self._iterators)
if self._show_progress:
iterators = tqdm.tqdm(iterators, total=total, leave=False)
reports = []
for iterator in iterators:
report = self._evaluate_one(target, iterator)
reports.append(report)
report = pandas.DataFrame(reports).mean(skipna=True).to_dict()
observation = dict()
with reporter.report_scope(observation):
reporter.report(report, target)
return observation
def _evaluate_one(self, target, iterator):
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
if self._show_progress:
it = tqdm.tqdm(it, total=len(it.dataset), leave=False)
in_values, out_values, rest_values = apply_to_iterator(
target.predict, it)
imgs, = in_values
pred_bboxes, pred_masks, pred_labels, pred_scores, \
pred_lbls_vis, pred_lbls_occ = out_values
gt_bboxes, gt_labels, gt_masks, gt_lbls_vis, gt_lbls_occ = rest_values
# evaluate
result = eval_instseg_voc(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, None,
use_07_metric=self.use_07_metric,
)
result_lbl_vis = eval_semantic_segmentation(pred_lbls_vis, gt_lbls_vis)
result['miou/vis'] = result_lbl_vis['miou']
result_lbl_occ = eval_occlusion_segmentation(
pred_lbls_occ, gt_lbls_occ
)
result['miou/occ'] = result_lbl_occ['miou']
result['miou'] = (result['miou/vis'] + result['miou/occ']) / 2.
return result | 0.506591 | 0.215908 |
import unittest
import collections
import dimod
from dwave.system.composites import VirtualGraphComposite
from dwave.system.testing import MockDWaveSampler
class TestVirtualGraphWithMockDWaveSampler(unittest.TestCase):
def setUp(self):
self.sampler = MockDWaveSampler()
def test_smoke(self):
child_sampler = MockDWaveSampler()
sampler = VirtualGraphComposite(child_sampler, {'a': [0]}, flux_bias_num_reads=1)
# depending on how recenlty flux bias data was gathered, this may be true
child_sampler.flux_biases_flag = False
if sampler.flux_biases:
sampler.sample_ising({'a': -1}, {})
self.assertTrue(child_sampler.flux_biases_flag) # true when some have been provided to sample_ising
def test_structure_keyword_setting(self):
sampler = VirtualGraphComposite(self.sampler, embedding={'a': set(range(8)),
'b': set(range(8, 16)),
'c': set(range(16, 24))},
flux_biases=False)
nodelist, edgelist, adj = sampler.structure
self.assertEqual(nodelist, ['a', 'b', 'c'])
self.assertEqual(edgelist, [('a', 'b'), ('b', 'c')])
self.assertEqual(adj, {'a': {'b'}, 'b': {'a', 'c'}, 'c': {'b'}})
# unlike variable names
sampler = VirtualGraphComposite(self.sampler, embedding={'a': set(range(8)),
1: set(range(8, 16)),
'c': set(range(16, 24))},
flux_biases=False)
nodelist, edgelist, adj = sampler.structure
self.assertEqual(set(nodelist), {'a', 1, 'c'})
self.assertEqual(adj, {'a': {1}, 1: {'a', 'c'}, 'c': {1}})
self.assertIsInstance(edgelist, list)
self.assertIsInstance(nodelist, list)
# edges should still be unique
for u in adj:
for v in adj[u]:
if (u, v) in edgelist:
assert (v, u) not in edgelist
if (v, u) in edgelist:
assert (u, v) not in edgelist
assert (u, v) in edgelist or (v, u) in edgelist
def test_embedding_parameter(self):
"""Given embedding should be saved as a parameter"""
sampler = self.sampler
__, __, adj = sampler.structure
embedding = {v: {v} for v in adj}
sampler = VirtualGraphComposite(sampler, embedding=embedding, flux_biases=False)
self.assertEqual(sampler.embedding, embedding)
def test_simple_complete_graph_sample_ising(self):
"""sample_ising on a K4."""
K4 = VirtualGraphComposite(self.sampler, embedding={0: {0, 4},
1: {1, 5},
2: {2, 6},
3: {3, 7}},
flux_biases=False)
K4.sample_ising({0: .1, 1: .2}, {(0, 1): 1.5})
class Test_ValidateChainStrength(unittest.TestCase):
def test_no_properties(self):
from dwave.system.composites.virtual_graph import _validate_chain_strength
Sampler = collections.namedtuple('Sampler', ['properties'])
sampler = Sampler({})
with self.assertRaises(ValueError):
_validate_chain_strength(sampler, None)
with self.assertRaises(ValueError):
_validate_chain_strength(sampler, 1.0)
def test_j_range(self):
from dwave.system.composites.virtual_graph import _validate_chain_strength
Sampler = collections.namedtuple('Sampler', ['properties'])
sampler = Sampler({'j_range': [-1.0, 1.0]})
self.assertEqual(_validate_chain_strength(sampler, None), 1.0)
self.assertEqual(_validate_chain_strength(sampler, .5), .5)
with self.assertRaises(ValueError):
_validate_chain_strength(sampler, 1.5)
def test_extended_j_range(self):
from dwave.system.composites.virtual_graph import _validate_chain_strength
Sampler = collections.namedtuple('Sampler', ['properties'])
sampler = Sampler({'j_range': [-1.0, 1.0], 'extended_j_range': [-2.0, 2.0]})
self.assertEqual(_validate_chain_strength(sampler, None), 2.0)
self.assertEqual(_validate_chain_strength(sampler, .5), .5)
self.assertEqual(_validate_chain_strength(sampler, 1.5), 1.5) | tests/test_virtual_graph_composite.py | import unittest
import collections
import dimod
from dwave.system.composites import VirtualGraphComposite
from dwave.system.testing import MockDWaveSampler
class TestVirtualGraphWithMockDWaveSampler(unittest.TestCase):
def setUp(self):
self.sampler = MockDWaveSampler()
def test_smoke(self):
child_sampler = MockDWaveSampler()
sampler = VirtualGraphComposite(child_sampler, {'a': [0]}, flux_bias_num_reads=1)
# depending on how recenlty flux bias data was gathered, this may be true
child_sampler.flux_biases_flag = False
if sampler.flux_biases:
sampler.sample_ising({'a': -1}, {})
self.assertTrue(child_sampler.flux_biases_flag) # true when some have been provided to sample_ising
def test_structure_keyword_setting(self):
sampler = VirtualGraphComposite(self.sampler, embedding={'a': set(range(8)),
'b': set(range(8, 16)),
'c': set(range(16, 24))},
flux_biases=False)
nodelist, edgelist, adj = sampler.structure
self.assertEqual(nodelist, ['a', 'b', 'c'])
self.assertEqual(edgelist, [('a', 'b'), ('b', 'c')])
self.assertEqual(adj, {'a': {'b'}, 'b': {'a', 'c'}, 'c': {'b'}})
# unlike variable names
sampler = VirtualGraphComposite(self.sampler, embedding={'a': set(range(8)),
1: set(range(8, 16)),
'c': set(range(16, 24))},
flux_biases=False)
nodelist, edgelist, adj = sampler.structure
self.assertEqual(set(nodelist), {'a', 1, 'c'})
self.assertEqual(adj, {'a': {1}, 1: {'a', 'c'}, 'c': {1}})
self.assertIsInstance(edgelist, list)
self.assertIsInstance(nodelist, list)
# edges should still be unique
for u in adj:
for v in adj[u]:
if (u, v) in edgelist:
assert (v, u) not in edgelist
if (v, u) in edgelist:
assert (u, v) not in edgelist
assert (u, v) in edgelist or (v, u) in edgelist
def test_embedding_parameter(self):
"""Given embedding should be saved as a parameter"""
sampler = self.sampler
__, __, adj = sampler.structure
embedding = {v: {v} for v in adj}
sampler = VirtualGraphComposite(sampler, embedding=embedding, flux_biases=False)
self.assertEqual(sampler.embedding, embedding)
def test_simple_complete_graph_sample_ising(self):
"""sample_ising on a K4."""
K4 = VirtualGraphComposite(self.sampler, embedding={0: {0, 4},
1: {1, 5},
2: {2, 6},
3: {3, 7}},
flux_biases=False)
K4.sample_ising({0: .1, 1: .2}, {(0, 1): 1.5})
class Test_ValidateChainStrength(unittest.TestCase):
def test_no_properties(self):
from dwave.system.composites.virtual_graph import _validate_chain_strength
Sampler = collections.namedtuple('Sampler', ['properties'])
sampler = Sampler({})
with self.assertRaises(ValueError):
_validate_chain_strength(sampler, None)
with self.assertRaises(ValueError):
_validate_chain_strength(sampler, 1.0)
def test_j_range(self):
from dwave.system.composites.virtual_graph import _validate_chain_strength
Sampler = collections.namedtuple('Sampler', ['properties'])
sampler = Sampler({'j_range': [-1.0, 1.0]})
self.assertEqual(_validate_chain_strength(sampler, None), 1.0)
self.assertEqual(_validate_chain_strength(sampler, .5), .5)
with self.assertRaises(ValueError):
_validate_chain_strength(sampler, 1.5)
def test_extended_j_range(self):
from dwave.system.composites.virtual_graph import _validate_chain_strength
Sampler = collections.namedtuple('Sampler', ['properties'])
sampler = Sampler({'j_range': [-1.0, 1.0], 'extended_j_range': [-2.0, 2.0]})
self.assertEqual(_validate_chain_strength(sampler, None), 2.0)
self.assertEqual(_validate_chain_strength(sampler, .5), .5)
self.assertEqual(_validate_chain_strength(sampler, 1.5), 1.5) | 0.75101 | 0.521349 |
import torch
from torch.utils.data.sampler import Sampler
def uxxxx_to_utf8(in_str):
idx = 0
result = ""
if in_str.strip() == "":
return ""
for uxxxx in in_str.split():
if uxxxx == "":
continue
if uxxxx == "<unk>" or uxxxx == "<s>" or uxxxx == "</s>":
cur_utf8_char = uxxxx
else:
# First get the 'xxxx' part out of the current 'uxxxx' char
cur_char = uxxxx[1:]
# Now decode the hex code point into utf-8
try:
cur_utf8_char = chr(int(cur_char, 16))
except:
print("Exception converting cur_char = [%s]" % cur_char)
# sys.exit(1)
# And add it to the result buffer
result = result + cur_utf8_char
return result
def utf8_to_uxxxx(in_str, output_array=False):
char_array = []
for char in in_str:
raw_hex = hex(ord(char))[2:].zfill(4).lower()
char_array.append("u%s" % raw_hex)
if output_array:
return char_array
else:
return " ".join(char_array)
class OcrGroupedSampler(Sampler):
"""Dataset is divided into sub-groups, G_1, G_2, ..., G_k
Samples Randomly in G_1, then moves on to sample randomly into G_2, etc all the way to G_k
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, rand=True, max_items=-1, fixed_rand=False):
self.size_group_keys = data_source.src.size_group_keys
self.size_groups = data_source.src.size_groups
self.num_samples = len(data_source.src)
self.rand = rand
self.fixed_rand = fixed_rand
self.max_items = max_items
self.rand_perm = dict()
def __iter__(self):
n_items = 0
for g in self.size_group_keys:
if len(self.size_groups[g]) == 0:
continue
if self.fixed_rand:
if g not in self.rand_perm:
self.rand_perm[g] = torch.randperm(len(self.size_groups[g])).long()
g_idx_iter = iter(self.rand_perm[g])
else:
if self.rand:
g_idx_iter = iter(torch.randperm(len(self.size_groups[g])).long())
else:
g_idx_iter = iter(range(len(self.size_groups[g])))
while True:
try:
g_idx = next(g_idx_iter)
except StopIteration:
break
n_items += 1
if self.max_items > 0 and n_items > self.max_items:
raise StopIteration
yield self.size_groups[g][g_idx]
raise StopIteration
def __len__(self):
return self.num_samples | fairseq/data/datautils.py | import torch
from torch.utils.data.sampler import Sampler
def uxxxx_to_utf8(in_str):
idx = 0
result = ""
if in_str.strip() == "":
return ""
for uxxxx in in_str.split():
if uxxxx == "":
continue
if uxxxx == "<unk>" or uxxxx == "<s>" or uxxxx == "</s>":
cur_utf8_char = uxxxx
else:
# First get the 'xxxx' part out of the current 'uxxxx' char
cur_char = uxxxx[1:]
# Now decode the hex code point into utf-8
try:
cur_utf8_char = chr(int(cur_char, 16))
except:
print("Exception converting cur_char = [%s]" % cur_char)
# sys.exit(1)
# And add it to the result buffer
result = result + cur_utf8_char
return result
def utf8_to_uxxxx(in_str, output_array=False):
char_array = []
for char in in_str:
raw_hex = hex(ord(char))[2:].zfill(4).lower()
char_array.append("u%s" % raw_hex)
if output_array:
return char_array
else:
return " ".join(char_array)
class OcrGroupedSampler(Sampler):
"""Dataset is divided into sub-groups, G_1, G_2, ..., G_k
Samples Randomly in G_1, then moves on to sample randomly into G_2, etc all the way to G_k
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, rand=True, max_items=-1, fixed_rand=False):
self.size_group_keys = data_source.src.size_group_keys
self.size_groups = data_source.src.size_groups
self.num_samples = len(data_source.src)
self.rand = rand
self.fixed_rand = fixed_rand
self.max_items = max_items
self.rand_perm = dict()
def __iter__(self):
n_items = 0
for g in self.size_group_keys:
if len(self.size_groups[g]) == 0:
continue
if self.fixed_rand:
if g not in self.rand_perm:
self.rand_perm[g] = torch.randperm(len(self.size_groups[g])).long()
g_idx_iter = iter(self.rand_perm[g])
else:
if self.rand:
g_idx_iter = iter(torch.randperm(len(self.size_groups[g])).long())
else:
g_idx_iter = iter(range(len(self.size_groups[g])))
while True:
try:
g_idx = next(g_idx_iter)
except StopIteration:
break
n_items += 1
if self.max_items > 0 and n_items > self.max_items:
raise StopIteration
yield self.size_groups[g][g_idx]
raise StopIteration
def __len__(self):
return self.num_samples | 0.359926 | 0.32344 |
import collections
import testtools
from neutron.db import api as db
from neutron.plugins.ml2.drivers.cisco import exceptions
from neutron.plugins.ml2.drivers.cisco import nexus_db_v2
from neutron.tests import base
class CiscoNexusDbTest(base.BaseTestCase):
"""Unit tests for Cisco mechanism driver's Nexus port binding database."""
NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance')
def setUp(self):
super(CiscoNexusDbTest, self).setUp()
db.configure_db()
self.addCleanup(db.clear_db)
def _npb_test_obj(self, pnum, vnum, switch='10.9.8.7', instance=None):
"""Creates a Nexus port binding test object from a pair of numbers."""
if pnum is 'router':
port = pnum
else:
port = '1/%s' % pnum
if instance is None:
instance = 'instance_%s_%s' % (pnum, vnum)
return self.NpbObj(port, vnum, switch, instance)
def _assert_bindings_match(self, npb, npb_obj):
"""Asserts that a port binding matches a port binding test obj."""
self.assertEqual(npb.port_id, npb_obj.port)
self.assertEqual(npb.vlan_id, npb_obj.vlan)
self.assertEqual(npb.switch_ip, npb_obj.switch)
self.assertEqual(npb.instance_id, npb_obj.instance)
def _add_binding_to_db(self, npb):
"""Adds a port binding to the Nexus database."""
return nexus_db_v2.add_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _add_bindings_to_db(self, npbs):
"""Adds a list of port bindings to the Nexus database."""
for npb in npbs:
nexus_db_v2.add_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _remove_binding_from_db(self, npb):
"""Removes a port binding from the Nexus database."""
return nexus_db_v2.remove_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _get_nexusport_binding(self, npb):
"""Gets a port binding based on port, vlan, switch, and instance."""
return nexus_db_v2.get_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _get_nexusvlan_binding(self, npb):
"""Gets port bindings based on vlan and switch."""
return nexus_db_v2.get_nexusvlan_binding(npb.vlan, npb.switch)
def _get_nexusvm_binding(self, npb):
"""Gets port bindings based on vlan and instance."""
return nexus_db_v2.get_nexusvm_binding(npb.vlan, npb.instance)
def _get_port_vlan_switch_binding(self, npb):
"""Gets port bindings based on port, vlan, and switch."""
return nexus_db_v2.get_port_vlan_switch_binding(
npb.port, npb.vlan, npb.switch)
def _get_port_switch_bindings(self, npb):
"""Get port bindings based on port and switch."""
return nexus_db_v2.get_port_switch_bindings(npb.port, npb.switch)
def test_nexusportbinding_add_remove(self):
"""Tests add and removal of port bindings from the Nexus database."""
npb11 = self._npb_test_obj(10, 100)
npb = self._add_binding_to_db(npb11)
self._assert_bindings_match(npb, npb11)
npb = self._remove_binding_from_db(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
self._remove_binding_from_db(npb11)
def test_nexusportbinding_get(self):
"""Tests get of specific port bindings from the database."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_nexusport_binding(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
npb = self._get_nexusport_binding(npb21)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb21)
npb = self._get_nexusport_binding(npb22)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusport_binding(
npb21.port, npb21.vlan, npb21.switch, "dummyInstance")
def test_nexusvlanbinding_get(self):
"""Test get of port bindings based on vlan and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb_all_v100 = self._get_nexusvlan_binding(npb11)
self.assertEqual(len(npb_all_v100), 2)
npb_v200 = self._get_nexusvlan_binding(npb22)
self.assertEqual(len(npb_v200), 1)
self._assert_bindings_match(npb_v200[0], npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusvlan_binding(npb21.vlan, "dummySwitch")
def test_nexusvmbinding_get(self):
"""Test get of port bindings based on vlan and instance."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_nexusvm_binding(npb21)
self._assert_bindings_match(npb, npb21)
npb = self._get_nexusvm_binding(npb22)
self._assert_bindings_match(npb, npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusvm_binding(npb21.vlan, "dummyInstance")
def test_nexusportvlanswitchbinding_get(self):
"""Tests get of port bindings based on port, vlan, and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
self._add_bindings_to_db([npb11, npb21])
npb = self._get_port_vlan_switch_binding(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_port_vlan_switch_binding(
npb21.port, npb21.vlan, "dummySwitch")
def test_nexusportswitchbinding_get(self):
"""Tests get of port bindings based on port and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2')
npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2')
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_port_switch_bindings(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
npb_all_p20 = self._get_port_switch_bindings(npb21)
self.assertEqual(len(npb_all_p20), 2)
npb = nexus_db_v2.get_port_switch_bindings(npb21.port, "dummySwitch")
self.assertIsNone(npb)
def test_nexussvibinding_get(self):
"""Tests get of switch virtual interface port bindings."""
npbr1 = self._npb_test_obj('router', 100)
npb21 = self._npb_test_obj(20, 100)
self._add_bindings_to_db([npbr1, npb21])
npb_svi = nexus_db_v2.get_nexussvi_bindings()
self.assertEqual(len(npb_svi), 1)
self._assert_bindings_match(npb_svi[0], npbr1)
npbr2 = self._npb_test_obj('router', 200)
self._add_binding_to_db(npbr2)
npb_svi = nexus_db_v2.get_nexussvi_bindings()
self.assertEqual(len(npb_svi), 2)
def test_nexusbinding_update(self):
"""Tests update of vlan IDs for port bindings."""
npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test')
npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test')
self._add_bindings_to_db([npb11, npb21])
npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 2)
npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test')
npb = nexus_db_v2.update_nexusport_binding(npb21.port, 200)
self._assert_bindings_match(npb, npb22)
npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 1)
self._assert_bindings_match(npb_all_v100[0], npb11)
npb = nexus_db_v2.update_nexusport_binding(npb21.port, 0)
self.assertIsNone(npb)
npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test')
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.update_nexusport_binding(npb33.port, 200) | neutron/tests/unit/ml2/drivers/test_cisco_nexus_db.py |
import collections
import testtools
from neutron.db import api as db
from neutron.plugins.ml2.drivers.cisco import exceptions
from neutron.plugins.ml2.drivers.cisco import nexus_db_v2
from neutron.tests import base
class CiscoNexusDbTest(base.BaseTestCase):
"""Unit tests for Cisco mechanism driver's Nexus port binding database."""
NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance')
def setUp(self):
super(CiscoNexusDbTest, self).setUp()
db.configure_db()
self.addCleanup(db.clear_db)
def _npb_test_obj(self, pnum, vnum, switch='10.9.8.7', instance=None):
"""Creates a Nexus port binding test object from a pair of numbers."""
if pnum is 'router':
port = pnum
else:
port = '1/%s' % pnum
if instance is None:
instance = 'instance_%s_%s' % (pnum, vnum)
return self.NpbObj(port, vnum, switch, instance)
def _assert_bindings_match(self, npb, npb_obj):
"""Asserts that a port binding matches a port binding test obj."""
self.assertEqual(npb.port_id, npb_obj.port)
self.assertEqual(npb.vlan_id, npb_obj.vlan)
self.assertEqual(npb.switch_ip, npb_obj.switch)
self.assertEqual(npb.instance_id, npb_obj.instance)
def _add_binding_to_db(self, npb):
"""Adds a port binding to the Nexus database."""
return nexus_db_v2.add_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _add_bindings_to_db(self, npbs):
"""Adds a list of port bindings to the Nexus database."""
for npb in npbs:
nexus_db_v2.add_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _remove_binding_from_db(self, npb):
"""Removes a port binding from the Nexus database."""
return nexus_db_v2.remove_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _get_nexusport_binding(self, npb):
"""Gets a port binding based on port, vlan, switch, and instance."""
return nexus_db_v2.get_nexusport_binding(
npb.port, npb.vlan, npb.switch, npb.instance)
def _get_nexusvlan_binding(self, npb):
"""Gets port bindings based on vlan and switch."""
return nexus_db_v2.get_nexusvlan_binding(npb.vlan, npb.switch)
def _get_nexusvm_binding(self, npb):
"""Gets port bindings based on vlan and instance."""
return nexus_db_v2.get_nexusvm_binding(npb.vlan, npb.instance)
def _get_port_vlan_switch_binding(self, npb):
"""Gets port bindings based on port, vlan, and switch."""
return nexus_db_v2.get_port_vlan_switch_binding(
npb.port, npb.vlan, npb.switch)
def _get_port_switch_bindings(self, npb):
"""Get port bindings based on port and switch."""
return nexus_db_v2.get_port_switch_bindings(npb.port, npb.switch)
def test_nexusportbinding_add_remove(self):
"""Tests add and removal of port bindings from the Nexus database."""
npb11 = self._npb_test_obj(10, 100)
npb = self._add_binding_to_db(npb11)
self._assert_bindings_match(npb, npb11)
npb = self._remove_binding_from_db(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
self._remove_binding_from_db(npb11)
def test_nexusportbinding_get(self):
"""Tests get of specific port bindings from the database."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_nexusport_binding(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
npb = self._get_nexusport_binding(npb21)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb21)
npb = self._get_nexusport_binding(npb22)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusport_binding(
npb21.port, npb21.vlan, npb21.switch, "dummyInstance")
def test_nexusvlanbinding_get(self):
"""Test get of port bindings based on vlan and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb_all_v100 = self._get_nexusvlan_binding(npb11)
self.assertEqual(len(npb_all_v100), 2)
npb_v200 = self._get_nexusvlan_binding(npb22)
self.assertEqual(len(npb_v200), 1)
self._assert_bindings_match(npb_v200[0], npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusvlan_binding(npb21.vlan, "dummySwitch")
def test_nexusvmbinding_get(self):
"""Test get of port bindings based on vlan and instance."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
npb22 = self._npb_test_obj(20, 200)
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_nexusvm_binding(npb21)
self._assert_bindings_match(npb, npb21)
npb = self._get_nexusvm_binding(npb22)
self._assert_bindings_match(npb, npb22)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusvm_binding(npb21.vlan, "dummyInstance")
def test_nexusportvlanswitchbinding_get(self):
"""Tests get of port bindings based on port, vlan, and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100)
self._add_bindings_to_db([npb11, npb21])
npb = self._get_port_vlan_switch_binding(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_port_vlan_switch_binding(
npb21.port, npb21.vlan, "dummySwitch")
def test_nexusportswitchbinding_get(self):
"""Tests get of port bindings based on port and switch."""
npb11 = self._npb_test_obj(10, 100)
npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2')
npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2')
self._add_bindings_to_db([npb11, npb21, npb22])
npb = self._get_port_switch_bindings(npb11)
self.assertEqual(len(npb), 1)
self._assert_bindings_match(npb[0], npb11)
npb_all_p20 = self._get_port_switch_bindings(npb21)
self.assertEqual(len(npb_all_p20), 2)
npb = nexus_db_v2.get_port_switch_bindings(npb21.port, "dummySwitch")
self.assertIsNone(npb)
def test_nexussvibinding_get(self):
"""Tests get of switch virtual interface port bindings."""
npbr1 = self._npb_test_obj('router', 100)
npb21 = self._npb_test_obj(20, 100)
self._add_bindings_to_db([npbr1, npb21])
npb_svi = nexus_db_v2.get_nexussvi_bindings()
self.assertEqual(len(npb_svi), 1)
self._assert_bindings_match(npb_svi[0], npbr1)
npbr2 = self._npb_test_obj('router', 200)
self._add_binding_to_db(npbr2)
npb_svi = nexus_db_v2.get_nexussvi_bindings()
self.assertEqual(len(npb_svi), 2)
def test_nexusbinding_update(self):
"""Tests update of vlan IDs for port bindings."""
npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test')
npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test')
self._add_bindings_to_db([npb11, npb21])
npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 2)
npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test')
npb = nexus_db_v2.update_nexusport_binding(npb21.port, 200)
self._assert_bindings_match(npb, npb22)
npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1')
self.assertEqual(len(npb_all_v100), 1)
self._assert_bindings_match(npb_all_v100[0], npb11)
npb = nexus_db_v2.update_nexusport_binding(npb21.port, 0)
self.assertIsNone(npb)
npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test')
with testtools.ExpectedException(exceptions.NexusPortBindingNotFound):
nexus_db_v2.update_nexusport_binding(npb33.port, 200) | 0.802865 | 0.351367 |
import argparse
import chainer
from chainer import iterators
from chainercv.datasets import coco_bbox_label_names
from chainercv.datasets import COCOBboxDataset
from chainercv.evaluations import eval_detection_coco
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from light_head_rcnn.links import LightHeadRCNNResNet101
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model', default='coco')
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
model = LightHeadRCNNResNet101(
n_fg_class=len(coco_bbox_label_names),
pretrained_model=args.pretrained_model)
model.use_preset('evaluate')
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
dataset = COCOBboxDataset(
split='minival', use_crowded=True,
return_crowded=True, return_area=True)
iterator = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
# delete unused iterators explicitly
del in_values
pred_bboxes, pred_labels, pred_scores = out_values
gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values
result = eval_detection_coco(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_areas, gt_crowdeds)
keys = [
'map/iou=0.50:0.95/area=all/max_dets=100',
'map/iou=0.50/area=all/max_dets=100',
'map/iou=0.75/area=all/max_dets=100',
'map/iou=0.50:0.95/area=small/max_dets=100',
'map/iou=0.50:0.95/area=medium/max_dets=100',
'map/iou=0.50:0.95/area=large/max_dets=100',
'mar/iou=0.50:0.95/area=all/max_dets=1',
'mar/iou=0.50:0.95/area=all/max_dets=10',
'mar/iou=0.50:0.95/area=all/max_dets=100',
'mar/iou=0.50:0.95/area=small/max_dets=100',
'mar/iou=0.50:0.95/area=medium/max_dets=100',
'mar/iou=0.50:0.95/area=large/max_dets=100',
]
print('')
for key in keys:
print('{:s}: {:f}'.format(key, result[key]))
if __name__ == '__main__':
main() | examples/eval_coco.py | import argparse
import chainer
from chainer import iterators
from chainercv.datasets import coco_bbox_label_names
from chainercv.datasets import COCOBboxDataset
from chainercv.evaluations import eval_detection_coco
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from light_head_rcnn.links import LightHeadRCNNResNet101
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model', default='coco')
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
model = LightHeadRCNNResNet101(
n_fg_class=len(coco_bbox_label_names),
pretrained_model=args.pretrained_model)
model.use_preset('evaluate')
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
dataset = COCOBboxDataset(
split='minival', use_crowded=True,
return_crowded=True, return_area=True)
iterator = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
# delete unused iterators explicitly
del in_values
pred_bboxes, pred_labels, pred_scores = out_values
gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values
result = eval_detection_coco(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_areas, gt_crowdeds)
keys = [
'map/iou=0.50:0.95/area=all/max_dets=100',
'map/iou=0.50/area=all/max_dets=100',
'map/iou=0.75/area=all/max_dets=100',
'map/iou=0.50:0.95/area=small/max_dets=100',
'map/iou=0.50:0.95/area=medium/max_dets=100',
'map/iou=0.50:0.95/area=large/max_dets=100',
'mar/iou=0.50:0.95/area=all/max_dets=1',
'mar/iou=0.50:0.95/area=all/max_dets=10',
'mar/iou=0.50:0.95/area=all/max_dets=100',
'mar/iou=0.50:0.95/area=small/max_dets=100',
'mar/iou=0.50:0.95/area=medium/max_dets=100',
'mar/iou=0.50:0.95/area=large/max_dets=100',
]
print('')
for key in keys:
print('{:s}: {:f}'.format(key, result[key]))
if __name__ == '__main__':
main() | 0.499512 | 0.225768 |
import argparse
import math
import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from models import BayesNet
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
data = data.view(-1, 784)
model.zero_grad()
outputs = model.sample_elbo(
data, target, len(train_loader), args.num_samples)
loss = outputs[0]
nll = outputs[1]
kl = outputs[2]
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tNLL: {:.6f}, KL: {:.4f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), nll.item(), kl.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data.view(-1, 784)
logprobs = 0
for _ in range(args.num_samples):
logprob = model(data)
logprobs += logprob
# sum up batch loss
logprob = logprobs / args.num_samples
test_loss += F.nll_loss(logprob ,
target, reduction='sum').item()
# get the index of the max log-probability
pred = logprob.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def run(args):
device = torch.cuda.current_device()
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
'./fmnist', train=True, download=True,
transform=transform),
batch_size=args.batch_size, shuffle=True,)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
'./fmnist', train=False, download=True,
transform=transform),
batch_size=args.batch_size, shuffle=False, )
model = BayesNet(args)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument("--input_size", type=int, default=784)
parser.add_argument("--hidden_size", type=int, default=128)
parser.add_argument("--output_size", type=int, default=10)
parser.add_argument("--num_layers", type=int, default=2)
parser.add_argument("--pi", type=float, default=0.5)
parser.add_argument("--num_samples", type=int, default=5)
args = parser.parse_args()
args.sigma_1 = math.exp(0)
args.sigma_2 = math.exp(-6)
run(args) | bayes-by-backprop/train.py | import argparse
import math
import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from models import BayesNet
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
data = data.view(-1, 784)
model.zero_grad()
outputs = model.sample_elbo(
data, target, len(train_loader), args.num_samples)
loss = outputs[0]
nll = outputs[1]
kl = outputs[2]
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tNLL: {:.6f}, KL: {:.4f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), nll.item(), kl.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data.view(-1, 784)
logprobs = 0
for _ in range(args.num_samples):
logprob = model(data)
logprobs += logprob
# sum up batch loss
logprob = logprobs / args.num_samples
test_loss += F.nll_loss(logprob ,
target, reduction='sum').item()
# get the index of the max log-probability
pred = logprob.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def run(args):
device = torch.cuda.current_device()
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
'./fmnist', train=True, download=True,
transform=transform),
batch_size=args.batch_size, shuffle=True,)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
'./fmnist', train=False, download=True,
transform=transform),
batch_size=args.batch_size, shuffle=False, )
model = BayesNet(args)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument("--input_size", type=int, default=784)
parser.add_argument("--hidden_size", type=int, default=128)
parser.add_argument("--output_size", type=int, default=10)
parser.add_argument("--num_layers", type=int, default=2)
parser.add_argument("--pi", type=float, default=0.5)
parser.add_argument("--num_samples", type=int, default=5)
args = parser.parse_args()
args.sigma_1 = math.exp(0)
args.sigma_2 = math.exp(-6)
run(args) | 0.848345 | 0.497376 |
import binascii
import json
import os
import os.path
from typing import Callable, Dict, Optional
import requests
from oauthlib.oauth2 import TokenExpiredError
from requests_oauthlib import OAuth2Session
from .exceptions import NeatoException, NeatoLoginException, NeatoRobotException
from .neato import Neato, Vendor
from .vorwerk import Vorwerk
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class Session:
def __init__(self, vendor: Vendor):
"""Initialize the session."""
self.vendor = vendor
self.endpoint = vendor.endpoint
self.headers = {"Accept": vendor.beehive_version}
def get(self, path, **kwargs):
"""Send a GET request to the specified path."""
raise NotImplementedError
def urljoin(self, path):
return urljoin(self.endpoint, path)
def generate_headers(
self, custom_headers: Optional[Dict[str, str]] = None
) -> Dict[str, str]:
"""Merge self.headers with custom headers id necessary."""
if not custom_headers:
return self.headers
return {**self.headers, **custom_headers}
class PasswordSession(Session):
def __init__(self, email: str, password: str, vendor: Vendor = Neato()):
super().__init__(vendor=vendor)
self._login(email, password)
def _login(self, email: str, password: str):
"""
Login to pybotvac account using provided email and password.
:param email: email for pybotvac account
:param password: <PASSWORD> <PASSWORD>
:return:
"""
try:
response = requests.post(
urljoin(self.endpoint, "sessions"),
json={
"email": email,
"password": password,
"platform": "ios",
"token": binascii.hexlify(os.urandom(64)).decode("utf8"),
},
headers=self.headers,
)
response.raise_for_status()
access_token = response.json()["access_token"]
self.headers["Authorization"] = "Token token=%s" % access_token
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
if (
isinstance(ex, requests.exceptions.HTTPError)
and ex.response.status_code == 403
):
raise NeatoLoginException(
"Unable to login to neato, check account credentials."
) from ex
raise NeatoRobotException("Unable to connect to Neato API.") from ex
def get(self, path, **kwargs):
url = self.urljoin(path)
headers = self.generate_headers(kwargs.pop("headers", None))
try:
response = requests.get(url, headers=headers, **kwargs)
response.raise_for_status()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
raise NeatoException("Unable to connect to neato the neato serves.") from ex
return response
class OAuthSession(Session):
def __init__(
self,
token: Optional[Dict[str, str]] = None,
client_id: str = None,
client_secret: str = None,
redirect_uri: str = None,
token_updater: Optional[Callable[[str], None]] = None,
vendor: Vendor = Neato(),
):
super().__init__(vendor=vendor)
self._client_id = client_id
self._client_secret = client_secret
self._redirect_uri = redirect_uri
self._token_updater = token_updater
extra = {"client_id": self._client_id, "client_secret": self._client_secret}
self._oauth = OAuth2Session(
auto_refresh_kwargs=extra,
client_id=client_id,
token=token,
redirect_uri=redirect_uri,
token_updater=token_updater,
scope=vendor.scope,
)
def refresh_tokens(self) -> dict:
"""Refresh and return new tokens."""
token = self._oauth.refresh_token(f"{self.endpoint}/auth/token")
if self._token_updater is not None:
self._token_updater(token)
return token
def get_authorization_url(self) -> str:
"""Get an authorization url via oauth2."""
# pylint: disable=unused-variable
authorization_url, state = self._oauth.authorization_url(
self.vendor.auth_endpoint
)
return authorization_url
def fetch_token(self, authorization_response: str) -> Dict[str, str]:
"""Fetch an access token via oauth2."""
token = self._oauth.fetch_token(
self.vendor.token_endpoint,
authorization_response=authorization_response,
client_secret=self._client_secret,
)
return token
def get(self, path: str, **kwargs) -> requests.Response:
"""Make a get request.
We don't use the built-in token refresh mechanism of OAuth2 session because
we want to allow overriding the token refresh logic.
"""
url = self.urljoin(path)
try:
response = self._get(url, **kwargs)
response.raise_for_status()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
raise NeatoException("Unable to connect to neato the neato serves.") from ex
return response
def _get(self, path: str, **kwargs) -> requests.Response:
"""Get request without error handling.
Refreshes the token if necessary.
"""
headers = self.generate_headers(kwargs.pop("headers", None))
try:
return self._oauth.get(path, headers=headers, **kwargs)
except TokenExpiredError:
self._oauth.token = self.refresh_tokens()
return self._oauth.get(path, headers=self.headers, **kwargs)
class PasswordlessSession(Session):
def __init__(
self,
token: Optional[Dict[str, str]] = None,
client_id: str = None,
token_updater: Optional[Callable[[str], None]] = None,
vendor: Vendor = Vorwerk(),
):
super().__init__(vendor=vendor)
self._token = token
self._client_id = client_id
self._token_updater = token_updater
def send_email_otp(self, email: str):
"""Request an authorization code via email."""
response = requests.post(
self.vendor.passwordless_endpoint,
data=json.dumps(
{
"client_id": self._client_id,
"connection": "email",
"email": email,
"send": "code",
}
),
headers={"Content-Type": "application/json"},
)
response.raise_for_status()
def fetch_token_passwordless(self, email: str, code: str):
"""Fetch an access token using the emailed code."""
response = requests.post(
self.vendor.token_endpoint,
data=json.dumps(
{
"prompt": "login",
"grant_type": "http://auth0.com/oauth/grant-type/passwordless/otp",
"scope": " ".join(self.vendor.scope),
"locale": "en",
"otp": code,
"source": self.vendor.source,
"platform": "ios",
"audience": self.vendor.audience,
"username": email,
"client_id": self._client_id,
"realm": "email",
"country_code": "DE",
}
),
headers={"Content-Type": "application/json"},
)
response.raise_for_status()
self._token = response.json()
def get(self, path: str, **kwargs) -> requests.Response:
"""Make a get request."""
url = self.urljoin(path)
headers = self.generate_headers(kwargs.pop("headers", None))
headers["Authorization"] = "Auth0Bearer {}".format(self._token.get("id_token"))
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
raise NeatoException("Unable to connect to neato servers.") from ex
return response | pybotvac/session.py | import binascii
import json
import os
import os.path
from typing import Callable, Dict, Optional
import requests
from oauthlib.oauth2 import TokenExpiredError
from requests_oauthlib import OAuth2Session
from .exceptions import NeatoException, NeatoLoginException, NeatoRobotException
from .neato import Neato, Vendor
from .vorwerk import Vorwerk
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class Session:
def __init__(self, vendor: Vendor):
"""Initialize the session."""
self.vendor = vendor
self.endpoint = vendor.endpoint
self.headers = {"Accept": vendor.beehive_version}
def get(self, path, **kwargs):
"""Send a GET request to the specified path."""
raise NotImplementedError
def urljoin(self, path):
return urljoin(self.endpoint, path)
def generate_headers(
self, custom_headers: Optional[Dict[str, str]] = None
) -> Dict[str, str]:
"""Merge self.headers with custom headers id necessary."""
if not custom_headers:
return self.headers
return {**self.headers, **custom_headers}
class PasswordSession(Session):
def __init__(self, email: str, password: str, vendor: Vendor = Neato()):
super().__init__(vendor=vendor)
self._login(email, password)
def _login(self, email: str, password: str):
"""
Login to pybotvac account using provided email and password.
:param email: email for pybotvac account
:param password: <PASSWORD> <PASSWORD>
:return:
"""
try:
response = requests.post(
urljoin(self.endpoint, "sessions"),
json={
"email": email,
"password": password,
"platform": "ios",
"token": binascii.hexlify(os.urandom(64)).decode("utf8"),
},
headers=self.headers,
)
response.raise_for_status()
access_token = response.json()["access_token"]
self.headers["Authorization"] = "Token token=%s" % access_token
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
if (
isinstance(ex, requests.exceptions.HTTPError)
and ex.response.status_code == 403
):
raise NeatoLoginException(
"Unable to login to neato, check account credentials."
) from ex
raise NeatoRobotException("Unable to connect to Neato API.") from ex
def get(self, path, **kwargs):
url = self.urljoin(path)
headers = self.generate_headers(kwargs.pop("headers", None))
try:
response = requests.get(url, headers=headers, **kwargs)
response.raise_for_status()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
raise NeatoException("Unable to connect to neato the neato serves.") from ex
return response
class OAuthSession(Session):
def __init__(
self,
token: Optional[Dict[str, str]] = None,
client_id: str = None,
client_secret: str = None,
redirect_uri: str = None,
token_updater: Optional[Callable[[str], None]] = None,
vendor: Vendor = Neato(),
):
super().__init__(vendor=vendor)
self._client_id = client_id
self._client_secret = client_secret
self._redirect_uri = redirect_uri
self._token_updater = token_updater
extra = {"client_id": self._client_id, "client_secret": self._client_secret}
self._oauth = OAuth2Session(
auto_refresh_kwargs=extra,
client_id=client_id,
token=token,
redirect_uri=redirect_uri,
token_updater=token_updater,
scope=vendor.scope,
)
def refresh_tokens(self) -> dict:
"""Refresh and return new tokens."""
token = self._oauth.refresh_token(f"{self.endpoint}/auth/token")
if self._token_updater is not None:
self._token_updater(token)
return token
def get_authorization_url(self) -> str:
"""Get an authorization url via oauth2."""
# pylint: disable=unused-variable
authorization_url, state = self._oauth.authorization_url(
self.vendor.auth_endpoint
)
return authorization_url
def fetch_token(self, authorization_response: str) -> Dict[str, str]:
"""Fetch an access token via oauth2."""
token = self._oauth.fetch_token(
self.vendor.token_endpoint,
authorization_response=authorization_response,
client_secret=self._client_secret,
)
return token
def get(self, path: str, **kwargs) -> requests.Response:
"""Make a get request.
We don't use the built-in token refresh mechanism of OAuth2 session because
we want to allow overriding the token refresh logic.
"""
url = self.urljoin(path)
try:
response = self._get(url, **kwargs)
response.raise_for_status()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
raise NeatoException("Unable to connect to neato the neato serves.") from ex
return response
def _get(self, path: str, **kwargs) -> requests.Response:
"""Get request without error handling.
Refreshes the token if necessary.
"""
headers = self.generate_headers(kwargs.pop("headers", None))
try:
return self._oauth.get(path, headers=headers, **kwargs)
except TokenExpiredError:
self._oauth.token = self.refresh_tokens()
return self._oauth.get(path, headers=self.headers, **kwargs)
class PasswordlessSession(Session):
def __init__(
self,
token: Optional[Dict[str, str]] = None,
client_id: str = None,
token_updater: Optional[Callable[[str], None]] = None,
vendor: Vendor = Vorwerk(),
):
super().__init__(vendor=vendor)
self._token = token
self._client_id = client_id
self._token_updater = token_updater
def send_email_otp(self, email: str):
"""Request an authorization code via email."""
response = requests.post(
self.vendor.passwordless_endpoint,
data=json.dumps(
{
"client_id": self._client_id,
"connection": "email",
"email": email,
"send": "code",
}
),
headers={"Content-Type": "application/json"},
)
response.raise_for_status()
def fetch_token_passwordless(self, email: str, code: str):
"""Fetch an access token using the emailed code."""
response = requests.post(
self.vendor.token_endpoint,
data=json.dumps(
{
"prompt": "login",
"grant_type": "http://auth0.com/oauth/grant-type/passwordless/otp",
"scope": " ".join(self.vendor.scope),
"locale": "en",
"otp": code,
"source": self.vendor.source,
"platform": "ios",
"audience": self.vendor.audience,
"username": email,
"client_id": self._client_id,
"realm": "email",
"country_code": "DE",
}
),
headers={"Content-Type": "application/json"},
)
response.raise_for_status()
self._token = response.json()
def get(self, path: str, **kwargs) -> requests.Response:
"""Make a get request."""
url = self.urljoin(path)
headers = self.generate_headers(kwargs.pop("headers", None))
headers["Authorization"] = "Auth0Bearer {}".format(self._token.get("id_token"))
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
raise NeatoException("Unable to connect to neato servers.") from ex
return response | 0.785226 | 0.093844 |
import os
from neptune.utils import validate_notebook_path
class Notebook(object):
"""It contains all the information about a Neptune Notebook
Args:
backend (:class:`~neptune.ApiClient`): A ApiClient object
project (:class:`~neptune.projects.Project`): Project object
_id (:obj:`str`): Notebook uuid
owner (:obj:`str`): Creator of the notebook is the Notebook owner
Examples:
.. code:: python3
# Create a notebook in Neptune.
notebook = project.create_notebook('data_exploration.ipynb')
"""
def __init__(self, backend, project, _id, owner):
self._backend = backend
self._project = project
self._id = _id
self._owner = owner
@property
def id(self):
return self._id
@property
def owner(self):
return self._owner
def add_checkpoint(self, file_path):
"""Uploads new checkpoint of the notebook to Neptune
Args:
file_path (:obj:`str`): File path containing notebook contents
Example:
.. code:: python3
# Create a notebook.
notebook = project.create_notebook('file.ipynb')
# Change content in your notebook & save it
# Upload new checkpoint
notebook.add_checkpoint('file.ipynb')
"""
validate_notebook_path(file_path)
with open(file_path) as f:
return self._backend.create_checkpoint(self.id, os.path.abspath(file_path), f)
def get_path(self):
"""Returns the path used to upload the current checkpoint of this notebook
Returns:
:obj:`str`: path of the current checkpoint
"""
return self._backend.get_last_checkpoint(self._project, self._id).path
def get_name(self):
"""Returns the name used to upload the current checkpoint of this notebook
Returns:
:obj:`str`: the name of current checkpoint
"""
return self._backend.get_last_checkpoint(self._project, self._id).name | neptune/notebook.py |
import os
from neptune.utils import validate_notebook_path
class Notebook(object):
"""It contains all the information about a Neptune Notebook
Args:
backend (:class:`~neptune.ApiClient`): A ApiClient object
project (:class:`~neptune.projects.Project`): Project object
_id (:obj:`str`): Notebook uuid
owner (:obj:`str`): Creator of the notebook is the Notebook owner
Examples:
.. code:: python3
# Create a notebook in Neptune.
notebook = project.create_notebook('data_exploration.ipynb')
"""
def __init__(self, backend, project, _id, owner):
self._backend = backend
self._project = project
self._id = _id
self._owner = owner
@property
def id(self):
return self._id
@property
def owner(self):
return self._owner
def add_checkpoint(self, file_path):
"""Uploads new checkpoint of the notebook to Neptune
Args:
file_path (:obj:`str`): File path containing notebook contents
Example:
.. code:: python3
# Create a notebook.
notebook = project.create_notebook('file.ipynb')
# Change content in your notebook & save it
# Upload new checkpoint
notebook.add_checkpoint('file.ipynb')
"""
validate_notebook_path(file_path)
with open(file_path) as f:
return self._backend.create_checkpoint(self.id, os.path.abspath(file_path), f)
def get_path(self):
"""Returns the path used to upload the current checkpoint of this notebook
Returns:
:obj:`str`: path of the current checkpoint
"""
return self._backend.get_last_checkpoint(self._project, self._id).path
def get_name(self):
"""Returns the name used to upload the current checkpoint of this notebook
Returns:
:obj:`str`: the name of current checkpoint
"""
return self._backend.get_last_checkpoint(self._project, self._id).name | 0.761006 | 0.251396 |
import io
import os
from functools import lru_cache
from lark.exceptions import UnexpectedInput, UnexpectedToken
from .compiler import Compiler
from .compiler.lowering import Lowering
from .exceptions import CompilerError, StoryError, StorySyntaxError
from .parser import Parser
@lru_cache(maxsize=1)
def _parser():
"""
Cached instance of the parser
"""
return Parser()
class Story:
"""
Represents a single story and exposes methods for reading, parsing and
compiling it.
"""
def __init__(self, story, features, path=None):
self.story = story
self.path = path
self.lines = story.splitlines(keepends=False)
self.features = features
@classmethod
def read(cls, path):
"""
Reads a story
"""
has_error = False
try:
with io.open(path, 'r') as file:
r = file.read()
return r
except FileNotFoundError:
has_error = True
if has_error:
abspath = os.path.abspath(path)
raise StoryError.create_error('file_not_found', path=path,
abspath=abspath)
@classmethod
def from_file(cls, path, features):
"""
Creates a story from a file source
"""
return Story(cls.read(path), features, path=path)
@classmethod
def from_stream(cls, stream, features):
"""
Creates a story from a stream source
"""
return Story(stream.read(), features)
def error(self, error):
"""
Handles errors by wrapping the real error in a smart StoryError
"""
return StoryError(error, self, path=self.path)
def parse(self, parser, lower=False):
"""
Parses the story, storing the tree
"""
if parser is None:
parser = self._parser()
try:
self.tree = parser.parse(self.story)
if lower:
proc = Lowering(parser, features=self.features)
self.tree = proc.process(self.tree)
except (CompilerError, StorySyntaxError) as error:
raise self.error(error) from error
except UnexpectedToken as error:
raise self.error(error) from error
except UnexpectedInput as error:
raise self.error(error) from error
def compile(self):
"""
Compiles the story and stores the result.
"""
try:
self.compiled = Compiler.compile(self.tree, story=self,
features=self.features)
except (CompilerError, StorySyntaxError) as error:
raise self.error(error) from error
def lex(self, parser):
"""
Lexes a story
"""
if parser is None:
parser = self._parser()
return parser.lex(self.story)
def process(self, parser=None):
"""
Parse and compile a story, returning the compiled JSON
"""
if parser is None:
parser = self._parser()
self.parse(parser=parser)
self.compile()
return self.compiled
def _parser(self):
"""
Returns the default Parser instance (cached)
"""
return _parser()
def line(self, i):
"""
Returns a line from the story source.
Line numbers start with 1.
"""
if isinstance(i, str):
if not i.isdigit():
return None
i = int(i)
assert i <= len(self.lines)
return self.lines[i - 1] | storyscript/Story.py | import io
import os
from functools import lru_cache
from lark.exceptions import UnexpectedInput, UnexpectedToken
from .compiler import Compiler
from .compiler.lowering import Lowering
from .exceptions import CompilerError, StoryError, StorySyntaxError
from .parser import Parser
@lru_cache(maxsize=1)
def _parser():
"""
Cached instance of the parser
"""
return Parser()
class Story:
"""
Represents a single story and exposes methods for reading, parsing and
compiling it.
"""
def __init__(self, story, features, path=None):
self.story = story
self.path = path
self.lines = story.splitlines(keepends=False)
self.features = features
@classmethod
def read(cls, path):
"""
Reads a story
"""
has_error = False
try:
with io.open(path, 'r') as file:
r = file.read()
return r
except FileNotFoundError:
has_error = True
if has_error:
abspath = os.path.abspath(path)
raise StoryError.create_error('file_not_found', path=path,
abspath=abspath)
@classmethod
def from_file(cls, path, features):
"""
Creates a story from a file source
"""
return Story(cls.read(path), features, path=path)
@classmethod
def from_stream(cls, stream, features):
"""
Creates a story from a stream source
"""
return Story(stream.read(), features)
def error(self, error):
"""
Handles errors by wrapping the real error in a smart StoryError
"""
return StoryError(error, self, path=self.path)
def parse(self, parser, lower=False):
"""
Parses the story, storing the tree
"""
if parser is None:
parser = self._parser()
try:
self.tree = parser.parse(self.story)
if lower:
proc = Lowering(parser, features=self.features)
self.tree = proc.process(self.tree)
except (CompilerError, StorySyntaxError) as error:
raise self.error(error) from error
except UnexpectedToken as error:
raise self.error(error) from error
except UnexpectedInput as error:
raise self.error(error) from error
def compile(self):
"""
Compiles the story and stores the result.
"""
try:
self.compiled = Compiler.compile(self.tree, story=self,
features=self.features)
except (CompilerError, StorySyntaxError) as error:
raise self.error(error) from error
def lex(self, parser):
"""
Lexes a story
"""
if parser is None:
parser = self._parser()
return parser.lex(self.story)
def process(self, parser=None):
"""
Parse and compile a story, returning the compiled JSON
"""
if parser is None:
parser = self._parser()
self.parse(parser=parser)
self.compile()
return self.compiled
def _parser(self):
"""
Returns the default Parser instance (cached)
"""
return _parser()
def line(self, i):
"""
Returns a line from the story source.
Line numbers start with 1.
"""
if isinstance(i, str):
if not i.isdigit():
return None
i = int(i)
assert i <= len(self.lines)
return self.lines[i - 1] | 0.589007 | 0.227748 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
from six import string_types
from spdx import annotation
from spdx import checksum
from spdx import creationinfo
from spdx import document
from spdx import file
from spdx import package
from spdx import review
from spdx import snippet
from spdx import utils
from spdx import version
from spdx.document import ExternalDocumentRef
from spdx.parsers.builderexceptions import CardinalityError
from spdx.parsers.builderexceptions import OrderError
from spdx.parsers.builderexceptions import SPDXValueError
from spdx.parsers import validations
def checksum_from_sha1(value):
"""
Return an spdx.checksum.Algorithm instance representing the SHA1
checksum or None if does not match CHECKSUM_RE.
"""
# More constrained regex at lexer level
CHECKSUM_RE = re.compile('SHA1:\\s*([\\S]+)', re.UNICODE)
match = CHECKSUM_RE.match(value)
if match:
return checksum.Algorithm(identifier='SHA1', value=match.group(1))
else:
return None
def str_from_text(text):
"""
Return content of a free form text block as a string.
"""
REGEX = re.compile('<text>((.|\n)+)</text>', re.UNICODE)
match = REGEX.match(text)
if match:
return match.group(1)
else:
return None
class DocBuilder(object):
"""
Set the fields of the top level document model.
"""
VERS_STR_REGEX = re.compile(r'SPDX-(\d+)\.(\d+)', re.UNICODE)
def __init__(self):
# FIXME: this state does not make sense
self.reset_document()
def set_doc_version(self, doc, value):
"""
Set the document version.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_version_set:
self.doc_version_set = True
m = self.VERS_STR_REGEX.match(value)
if m is None:
raise SPDXValueError('Document::Version')
else:
doc.version = version.Version(major=int(m.group(1)),
minor=int(m.group(2)))
return True
else:
raise CardinalityError('Document::Version')
def set_doc_data_lics(self, doc, lics):
"""
Set the document data license.
Raise value error if malformed value
Raise CardinalityError if already defined.
"""
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
if validations.validate_data_lics(lics):
doc.data_license = document.License.from_identifier(lics)
return True
else:
raise SPDXValueError('Document::DataLicense')
else:
raise CardinalityError('Document::DataLicense')
def set_doc_name(self, doc, name):
"""
Set the document name.
Raise CardinalityError if already defined.
"""
if not self.doc_name_set:
doc.name = name
self.doc_name_set = True
return True
else:
raise CardinalityError('Document::Name')
def set_doc_spdx_id(self, doc, doc_spdx_id_line):
"""
Set the document SPDX Identifier.
Raise value error if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_spdx_id_set:
if doc_spdx_id_line == 'SPDXRef-DOCUMENT':
doc.spdx_id = doc_spdx_id_line
self.doc_spdx_id_set = True
return True
else:
raise SPDXValueError('Document::SPDXID')
else:
raise CardinalityError('Document::SPDXID')
def set_doc_comment(self, doc, comment):
"""
Set document comment.
Raise CardinalityError if comment already set.
Raise SPDXValueError if comment is not free form text.
"""
if not self.doc_comment_set:
self.doc_comment_set = True
if validations.validate_doc_comment(comment):
doc.comment = str_from_text(comment)
return True
else:
raise SPDXValueError('Document::Comment')
else:
raise CardinalityError('Document::Comment')
def set_doc_namespace(self, doc, namespace):
"""
Set the document namespace.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_namespace_set:
self.doc_namespace_set = True
if validations.validate_doc_namespace(namespace):
doc.namespace = namespace
return True
else:
raise SPDXValueError('Document::Namespace')
else:
raise CardinalityError('Document::Comment')
def reset_document(self):
"""
Reset the state to allow building new documents
"""
# FIXME: this state does not make sense
self.doc_version_set = False
self.doc_comment_set = False
self.doc_namespace_set = False
self.doc_data_lics_set = False
self.doc_name_set = False
self.doc_spdx_id_set = False
class ExternalDocumentRefBuilder(object):
def set_ext_doc_id(self, doc, ext_doc_id):
"""
Set the `external_document_id` attribute of the `ExternalDocumentRef` object.
"""
doc.add_ext_document_reference(
ExternalDocumentRef(
external_document_id=ext_doc_id))
def set_spdx_doc_uri(self, doc, spdx_doc_uri):
"""
Set the `spdx_document_uri` attribute of the `ExternalDocumentRef` object.
"""
if validations.validate_doc_namespace(spdx_doc_uri):
doc.ext_document_references[-1].spdx_document_uri = spdx_doc_uri
else:
raise SPDXValueError('Document::ExternalDocumentRef')
def set_chksum(self, doc, chksum):
"""
Set the `check_sum` attribute of the `ExternalDocumentRef` object.
"""
doc.ext_document_references[-1].check_sum = checksum_from_sha1(
chksum)
def add_ext_doc_refs(self, doc, ext_doc_id, spdx_doc_uri, chksum):
self.set_ext_doc_id(doc, ext_doc_id)
self.set_spdx_doc_uri(doc, spdx_doc_uri)
self.set_chksum(doc, chksum)
class EntityBuilder(object):
tool_re = re.compile(r'Tool:\s*(.+)', re.UNICODE)
person_re = re.compile(r'Person:\s*(([^(])+)(\((.*)\))?', re.UNICODE)
org_re = re.compile(r'Organization:\s*(([^(])+)(\((.*)\))?', re.UNICODE)
PERSON_NAME_GROUP = 1
PERSON_EMAIL_GROUP = 4
ORG_NAME_GROUP = 1
ORG_EMAIL_GROUP = 4
TOOL_NAME_GROUP = 1
def build_tool(self, doc, entity):
"""
Build a tool object out of a string representation.
Return built tool.
Raise SPDXValueError if failed to extract tool name or name is malformed
"""
match = self.tool_re.match(entity)
if match and validations.validate_tool_name(match.group(self.TOOL_NAME_GROUP)):
name = match.group(self.TOOL_NAME_GROUP)
return creationinfo.Tool(name)
else:
raise SPDXValueError('Failed to extract tool name')
def build_org(self, doc, entity):
"""
Build an organization object of of a string representation.
Return built organization.
Raise SPDXValueError if failed to extractname.
"""
match = self.org_re.match(entity)
if match and validations.validate_org_name(match.group(self.ORG_NAME_GROUP)):
name = match.group(self.ORG_NAME_GROUP).strip()
email = match.group(self.ORG_EMAIL_GROUP)
if (email is not None) and (len(email) != 0):
return creationinfo.Organization(name=name, email=email.strip())
else:
return creationinfo.Organization(name=name, email=None)
else:
raise SPDXValueError('Failed to extract Organization name')
def build_person(self, doc, entity):
"""
Build an organization object of of a string representation.
Return built organization. Raise SPDXValueError if failed to extract name.
"""
match = self.person_re.match(entity)
if match and validations.validate_person_name(match.group(self.PERSON_NAME_GROUP)):
name = match.group(self.PERSON_NAME_GROUP).strip()
email = match.group(self.PERSON_EMAIL_GROUP)
if (email is not None) and (len(email) != 0):
return creationinfo.Person(name=name, email=email.strip())
else:
return creationinfo.Person(name=name, email=None)
else:
raise SPDXValueError('Failed to extract person name')
class CreationInfoBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_creation_info()
def add_creator(self, doc, creator):
"""
Add a creator to the document's creation info.
Return true if creator is valid.
Creator must be built by an EntityBuilder.
Raise SPDXValueError if not a creator type.
"""
if validations.validate_creator(creator):
doc.creation_info.add_creator(creator)
return True
else:
raise SPDXValueError('CreationInfo::Creator')
def set_created_date(self, doc, created):
"""
Set created date.
Raise CardinalityError if created date already set.
Raise SPDXValueError if created is not a date.
"""
if not self.created_date_set:
self.created_date_set = True
date = utils.datetime_from_iso_format(created)
if date is not None:
doc.creation_info.created = date
return True
else:
raise SPDXValueError('CreationInfo::Date')
else:
raise CardinalityError('CreationInfo::Created')
def set_creation_comment(self, doc, comment):
"""
Set creation comment.
Raise CardinalityError if comment already set.
Raise SPDXValueError if not free form text.
"""
if not self.creation_comment_set:
self.creation_comment_set = True
if validations.validate_creation_comment(comment):
doc.creation_info.comment = str_from_text(comment)
return True
else:
raise SPDXValueError('CreationInfo::Comment')
else:
raise CardinalityError('CreationInfo::Comment')
def set_lics_list_ver(self, doc, value):
"""
Set the license list version.
Raise CardinalityError if already set.
Raise SPDXValueError if incorrect value.
"""
if not self.lics_list_ver_set:
self.lics_list_ver_set = True
vers = version.Version.from_str(value)
if vers is not None:
doc.creation_info.license_list_version = vers
return True
else:
raise SPDXValueError('CreationInfo::LicenseListVersion')
else:
raise CardinalityError('CreationInfo::LicenseListVersion')
def reset_creation_info(self):
"""
Reset builder state to allow building new creation info.
"""
# FIXME: this state does not make sense
self.created_date_set = False
self.creation_comment_set = False
self.lics_list_ver_set = False
class ReviewBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_reviews()
def reset_reviews(self):
"""
Reset the builder's state to allow building new reviews.
"""
# FIXME: this state does not make sense
self.review_date_set = False
self.review_comment_set = False
def add_reviewer(self, doc, reviewer):
"""
Adds a reviewer to the SPDX Document.
Reviwer is an entity created by an EntityBuilder.
Raise SPDXValueError if not a valid reviewer type.
"""
# Each reviewer marks the start of a new review object.
# FIXME: this state does not make sense
self.reset_reviews()
if validations.validate_reviewer(reviewer):
doc.add_review(review.Review(reviewer=reviewer))
return True
else:
raise SPDXValueError('Review::Reviewer')
def add_review_date(self, doc, reviewed):
"""
Set the review date.
Raise CardinalityError if already set.
Raise OrderError if no reviewer defined before.
Raise SPDXValueError if invalid reviewed value.
"""
if len(doc.reviews) != 0:
if not self.review_date_set:
self.review_date_set = True
date = utils.datetime_from_iso_format(reviewed)
if date is not None:
doc.reviews[-1].review_date = date
return True
else:
raise SPDXValueError('Review::ReviewDate')
else:
raise CardinalityError('Review::ReviewDate')
else:
raise OrderError('Review::ReviewDate')
def add_review_comment(self, doc, comment):
"""
Set the review comment.
Raise CardinalityError if already set.
Raise OrderError if no reviewer defined before.
Raise SPDXValueError if comment is not free form text.
"""
if len(doc.reviews) != 0:
if not self.review_comment_set:
self.review_comment_set = True
if validations.validate_review_comment(comment):
doc.reviews[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError('ReviewComment::Comment')
else:
raise CardinalityError('ReviewComment')
else:
raise OrderError('ReviewComment')
class AnnotationBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_annotations()
def reset_annotations(self):
"""
Reset the builder's state to allow building new annotations.
"""
# FIXME: this state does not make sense
self.annotation_date_set = False
self.annotation_comment_set = False
self.annotation_type_set = False
self.annotation_spdx_id_set = False
def add_annotator(self, doc, annotator):
"""
Add an annotator to the SPDX Document.
Annotator is an entity created by an EntityBuilder.
Raise SPDXValueError if not a valid annotator type.
"""
# Each annotator marks the start of a new annotation object.
# FIXME: this state does not make sense
self.reset_annotations()
if validations.validate_annotator(annotator):
doc.add_annotation(annotation.Annotation(annotator=annotator))
return True
else:
raise SPDXValueError('Annotation::Annotator')
def add_annotation_date(self, doc, annotation_date):
"""
Set the annotation date.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if invalid value.
"""
if len(doc.annotations) != 0:
if not self.annotation_date_set:
self.annotation_date_set = True
date = utils.datetime_from_iso_format(annotation_date)
if date is not None:
doc.annotations[-1].annotation_date = date
return True
else:
raise SPDXValueError('Annotation::AnnotationDate')
else:
raise CardinalityError('Annotation::AnnotationDate')
else:
raise OrderError('Annotation::AnnotationDate')
def add_annotation_comment(self, doc, comment):
"""
Set the annotation comment.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if comment is not free form text.
"""
if len(doc.annotations) != 0:
if not self.annotation_comment_set:
self.annotation_comment_set = True
if validations.validate_annotation_comment(comment):
doc.annotations[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError('AnnotationComment::Comment')
else:
raise CardinalityError('AnnotationComment::Comment')
else:
raise OrderError('AnnotationComment::Comment')
def add_annotation_type(self, doc, annotation_type):
"""
Set the annotation type.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if invalid value.
"""
if len(doc.annotations) != 0:
if not self.annotation_type_set:
self.annotation_type_set = True
if validations.validate_annotation_type(annotation_type):
doc.annotations[-1].annotation_type = annotation_type
return True
else:
raise SPDXValueError('Annotation::AnnotationType')
else:
raise CardinalityError('Annotation::AnnotationType')
else:
raise OrderError('Annotation::AnnotationType')
def set_annotation_spdx_id(self, doc, spdx_id):
"""
Set the annotation SPDX Identifier.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
"""
if len(doc.annotations) != 0:
if not self.annotation_spdx_id_set:
self.annotation_spdx_id_set = True
doc.annotations[-1].spdx_id = spdx_id
return True
else:
raise CardinalityError('Annotation::SPDXREF')
else:
raise OrderError('Annotation::SPDXREF')
class PackageBuilder(object):
VERIF_CODE_REGEX = re.compile(r"([0-9a-f]+)\s*(\(\s*(.+)\))?", re.UNICODE)
VERIF_CODE_CODE_GRP = 1
VERIF_CODE_EXC_FILES_GRP = 3
def __init__(self):
# FIXME: this state does not make sense
self.reset_package()
def reset_package(self):
"""Resets the builder's state in order to build new packages."""
# FIXME: this state does not make sense
self.package_set = False
self.package_spdx_id_set = False
self.package_vers_set = False
self.package_file_name_set = False
self.package_supplier_set = False
self.package_originator_set = False
self.package_down_location_set = False
self.package_files_analyzed_set = False
self.package_home_set = False
self.package_verif_set = False
self.package_chk_sum_set = False
self.package_source_info_set = False
self.package_conc_lics_set = False
self.package_license_declared_set = False
self.package_license_comment_set = False
self.package_cr_text_set = False
self.package_summary_set = False
self.package_desc_set = False
self.package_comment_set = False
self.pkg_ext_comment_set = False
def create_package(self, doc, name):
"""
Create a package for the SPDX Document.
name - any string.
Raise CardinalityError if package already defined.
"""
if not self.package_set:
self.package_set = True
doc.package = package.Package(name=name)
return True
else:
raise CardinalityError('Package::Name')
def set_pkg_spdx_id(self, doc, spdx_id):
"""
Set the Package SPDX Identifier.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
self.assert_package_exists()
if not self.package_spdx_id_set:
if validations.validate_pkg_spdx_id(spdx_id):
doc.package.spdx_id = spdx_id
self.package_spdx_id_set = True
return True
else:
raise SPDXValueError('Package::SPDXID')
else:
raise CardinalityError('Package::SPDXID')
def set_pkg_vers(self, doc, version):
"""
Set package version, if not already set.
version - Any string.
Raise CardinalityError if already has a version.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_vers_set:
self.package_vers_set = True
doc.package.version = version
return True
else:
raise CardinalityError('Package::Version')
def set_pkg_file_name(self, doc, name):
"""
Set the package file name, if not already set.
name - Any string.
Raise CardinalityError if already has a file_name.
Raise OrderError if no pacakge previously defined.
"""
self.assert_package_exists()
if not self.package_file_name_set:
self.package_file_name_set = True
doc.package.file_name = name
return True
else:
raise CardinalityError('Package::FileName')
def set_pkg_supplier(self, doc, entity):
"""
Set the package supplier, if not already set.
entity - Organization, Person or NoAssert.
Raise CardinalityError if already has a supplier.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_supplier_set:
self.package_supplier_set = True
if validations.validate_pkg_supplier(entity):
doc.package.supplier = entity
return True
else:
raise SPDXValueError('Package::Supplier')
else:
raise CardinalityError('Package::Supplier')
def set_pkg_originator(self, doc, entity):
"""
Set the package originator, if not already set.
entity - Organization, Person or NoAssert.
Raise CardinalityError if already has an originator.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_originator_set:
self.package_originator_set = True
if validations.validate_pkg_originator(entity):
doc.package.originator = entity
return True
else:
raise SPDXValueError('Package::Originator')
else:
raise CardinalityError('Package::Originator')
def set_pkg_down_location(self, doc, location):
"""
Set the package download location, if not already set.
location - A string
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_down_location_set:
self.package_down_location_set = True
doc.package.download_location = location
return True
else:
raise CardinalityError('Package::DownloadLocation')
def set_pkg_files_analyzed(self, doc, files_analyzed):
"""
Set the package files analyzed, if not already set.
Raise SPDXValueError if malformed value, CardinalityError if
already defined.
"""
self.assert_package_exists()
if not self.package_files_analyzed_set:
if files_analyzed:
if validations.validate_pkg_files_analyzed(files_analyzed):
self.package_files_analyzed_set = True
doc.package.files_analyzed = files_analyzed
print(doc.package.files_analyzed)
return True
else:
raise SPDXValueError('Package::FilesAnalyzed')
else:
raise CardinalityError('Package::FilesAnalyzed')
def set_pkg_home(self, doc, location):
"""Set the package homepage location if not already set.
location - A string or None or NoAssert.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise SPDXValueError if location has incorrect value.
"""
self.assert_package_exists()
if not self.package_home_set:
self.package_home_set = True
if validations.validate_pkg_homepage(location):
doc.package.homepage = location
return True
else:
raise SPDXValueError('Package::HomePage')
else:
raise CardinalityError('Package::HomePage')
def set_pkg_verif_code(self, doc, code):
"""
Set the package verification code, if not already set.
code - A string.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise Value error if doesn't match verifcode form
"""
self.assert_package_exists()
if not self.package_verif_set:
self.package_verif_set = True
match = self.VERIF_CODE_REGEX.match(code)
if match:
doc.package.verif_code = match.group(self.VERIF_CODE_CODE_GRP)
if match.group(self.VERIF_CODE_EXC_FILES_GRP) is not None:
doc.package.verif_exc_files = match.group(self.VERIF_CODE_EXC_FILES_GRP).split(',')
return True
else:
raise SPDXValueError('Package::VerificationCode')
else:
raise CardinalityError('Package::VerificationCode')
def set_pkg_chk_sum(self, doc, chk_sum):
"""
Set the package check sum, if not already set.
chk_sum - A string
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_chk_sum_set:
self.package_chk_sum_set = True
doc.package.check_sum = checksum_from_sha1(chk_sum)
return True
else:
raise CardinalityError('Package::CheckSum')
def set_pkg_source_info(self, doc, text):
"""
Set the package's source information, if not already set.
text - Free form text.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
SPDXValueError if text is not free form text.
"""
self.assert_package_exists()
if not self.package_source_info_set:
self.package_source_info_set = True
if validations.validate_pkg_src_info(text):
doc.package.source_info = str_from_text(text)
return True
else:
raise SPDXValueError('Pacckage::SourceInfo')
else:
raise CardinalityError('Package::SourceInfo')
def set_pkg_licenses_concluded(self, doc, licenses):
"""
Set the package's concluded licenses.
licenses - License info.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise SPDXValueError if data malformed.
"""
self.assert_package_exists()
if not self.package_conc_lics_set:
self.package_conc_lics_set = True
if validations.validate_lics_conc(licenses):
doc.package.conc_lics = licenses
return True
else:
raise SPDXValueError('Package::ConcludedLicenses')
else:
raise CardinalityError('Package::ConcludedLicenses')
def set_pkg_license_from_file(self, doc, lic):
"""
Add a license from a file to the package.
Raise SPDXValueError if data malformed.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if validations.validate_lics_from_file(lic):
doc.package.licenses_from_files.append(lic)
return True
else:
raise SPDXValueError('Package::LicensesFromFile')
def set_pkg_license_declared(self, doc, lic):
"""
Set the package's declared license.
Raise SPDXValueError if data malformed.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
"""
self.assert_package_exists()
if not self.package_license_declared_set:
self.package_license_declared_set = True
if validations.validate_lics_conc(lic):
doc.package.license_declared = lic
return True
else:
raise SPDXValueError('Package::LicenseDeclared')
else:
raise CardinalityError('Package::LicenseDeclared')
def set_pkg_license_comment(self, doc, text):
"""
Set the package's license comment.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if text is not free form text.
"""
self.assert_package_exists()
if not self.package_license_comment_set:
self.package_license_comment_set = True
if validations.validate_pkg_lics_comment(text):
doc.package.license_comment = str_from_text(text)
return True
else:
raise SPDXValueError('Package::LicenseComment')
else:
raise CardinalityError('Package::LicenseComment')
def set_pkg_cr_text(self, doc, text):
"""
Set the package's copyright text.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
Raise value error if text is not one of [None, NOASSERT, TEXT].
"""
self.assert_package_exists()
if not self.package_cr_text_set:
self.package_cr_text_set = True
if validations.validate_pkg_cr_text(text):
if isinstance(text, string_types):
doc.package.cr_text = str_from_text(text)
else:
doc.package.cr_text = text # None or NoAssert
else:
raise SPDXValueError('Package::CopyrightText')
else:
raise CardinalityError('Package::CopyrightText')
def set_pkg_summary(self, doc, text):
"""
Set the package summary.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if summary already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_summary_set:
self.package_summary_set = True
if validations.validate_pkg_summary(text):
doc.package.summary = str_from_text(text)
else:
raise SPDXValueError('Package::Summary')
else:
raise CardinalityError('Package::Summary')
def set_pkg_desc(self, doc, text):
"""
Set the package's description.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if description already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_desc_set:
self.package_desc_set = True
if validations.validate_pkg_desc(text):
doc.package.description = str_from_text(text)
else:
raise SPDXValueError('Package::Description')
else:
raise CardinalityError('Package::Description')
def set_pkg_comment(self, doc, text):
"""
Set the package's comment.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if comment already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_comment_set:
self.package_comment_set = True
if validations.validate_pkg_comment(text):
doc.package.comment = str_from_text(text)
else:
raise SPDXValueError('Package::Comment')
else:
raise CardinalityError('Package::Comment')
def set_pkg_ext_ref_category(self, doc, category):
"""
Set the `category` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_category(category):
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].category is None):
doc.package.pkg_ext_refs[-1].category = category
else:
doc.package.add_pkg_ext_refs(
package.ExternalPackageRef(category=category))
else:
raise SPDXValueError('ExternalRef::Category')
def set_pkg_ext_ref_type(self, doc, pkg_ext_ref_type):
"""
Set the `pkg_ext_ref_type` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_type(pkg_ext_ref_type):
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].pkg_ext_ref_type is None):
doc.package.pkg_ext_refs[-1].pkg_ext_ref_type = pkg_ext_ref_type
else:
doc.package.add_pkg_ext_refs(package.ExternalPackageRef(
pkg_ext_ref_type=pkg_ext_ref_type))
else:
raise SPDXValueError('ExternalRef::Type')
def set_pkg_ext_ref_locator(self, doc, locator):
"""
Set the `locator` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].locator is None):
doc.package.pkg_ext_refs[-1].locator = locator
else:
doc.package.add_pkg_ext_refs(package.ExternalPackageRef(
locator=locator))
def add_pkg_ext_ref_comment(self, doc, comment):
"""
Set the `comment` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if not len(doc.package.pkg_ext_refs):
raise OrderError('Package::ExternalRef')
else:
if validations.validate_pkg_ext_ref_comment(comment):
doc.package.pkg_ext_refs[-1].comment = str_from_text(comment)
else:
raise SPDXValueError('ExternalRef::Comment')
def add_pkg_ext_refs(self, doc, category, pkg_ext_ref_type, locator):
self.set_pkg_ext_ref_category(doc, category)
self.set_pkg_ext_ref_type(doc, pkg_ext_ref_type)
self.set_pkg_ext_ref_locator(doc, locator)
def assert_package_exists(self):
if not self.package_set:
raise OrderError('Package')
class FileBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_file_stat()
def set_file_name(self, doc, name):
"""
Raise OrderError if no package defined.
"""
if self.has_package(doc):
doc.package.files.append(file.File(name))
# A file name marks the start of a new file instance.
# The builder must be reset
# FIXME: this state does not make sense
self.reset_file_stat()
return True
else:
raise OrderError('File::Name')
def set_file_spdx_id(self, doc, spdx_id):
"""
Set the file SPDX Identifier.
Raise OrderError if no package or no file defined.
Raise SPDXValueError if malformed value.
Raise CardinalityError if more than one spdx_id set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_spdx_id_set:
self.file_spdx_id_set = True
if validations.validate_file_spdx_id(spdx_id):
self.file(doc).spdx_id = spdx_id
return True
else:
raise SPDXValueError('File::SPDXID')
else:
raise CardinalityError('File::SPDXID')
else:
raise OrderError('File::SPDXID')
def set_file_comment(self, doc, text):
"""
Raise OrderError if no package or no file defined.
Raise CardinalityError if more than one comment set.
Raise SPDXValueError if text is not free form text.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_comment_set:
self.file_comment_set = True
if validations.validate_file_comment(text):
self.file(doc).comment = str_from_text(text)
return True
else:
raise SPDXValueError('File::Comment')
else:
raise CardinalityError('File::Comment')
else:
raise OrderError('File::Comment')
def set_file_type(self, doc, type_value):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one type set.
Raise SPDXValueError if type is unknown.
"""
type_dict = {
'SOURCE': file.FileType.SOURCE,
'BINARY': file.FileType.BINARY,
'ARCHIVE': file.FileType.ARCHIVE,
'OTHER': file.FileType.OTHER
}
if self.has_package(doc) and self.has_file(doc):
if not self.file_type_set:
self.file_type_set = True
if type_value in type_dict.keys():
self.file(doc).type = type_dict[type_value]
return True
else:
raise SPDXValueError('File::Type')
else:
raise CardinalityError('File::Type')
else:
raise OrderError('File::Type')
def set_file_chksum(self, doc, chksum):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one chksum set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_chksum_set:
self.file_chksum_set = True
self.file(doc).chk_sum = checksum_from_sha1(chksum)
return True
else:
raise CardinalityError('File::CheckSum')
else:
raise OrderError('File::CheckSum')
def set_concluded_license(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if already set.
Raise SPDXValueError if malformed.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_conc_lics_set:
self.file_conc_lics_set = True
if validations.validate_lics_conc(lic):
self.file(doc).conc_lics = lic
return True
else:
raise SPDXValueError('File::ConcludedLicense')
else:
raise CardinalityError('File::ConcludedLicense')
else:
raise OrderError('File::ConcludedLicense')
def set_file_license_in_file(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if malformed value.
"""
if self.has_package(doc) and self.has_file(doc):
if validations.validate_file_lics_in_file(lic):
self.file(doc).add_lics(lic)
return True
else:
raise SPDXValueError('File::LicenseInFile')
else:
raise OrderError('File::LicenseInFile')
def set_file_license_comment(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
if validations.validate_file_lics_comment(text):
self.file(doc).license_comment = str_from_text(text)
else:
raise SPDXValueError('File::LicenseComment')
else:
raise CardinalityError('File::LicenseComment')
else:
raise OrderError('File::LicenseComment')
def set_file_copyright(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text or NONE or NO_ASSERT.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_copytext_set:
self.file_copytext_set = True
if validations.validate_file_cpyright(text):
if isinstance(text, string_types):
self.file(doc).copyright = str_from_text(text)
else:
self.file(doc).copyright = text # None or NoAssert
return True
else:
raise SPDXValueError('File::CopyRight')
else:
raise CardinalityError('File::CopyRight')
else:
raise OrderError('File::CopyRight')
def set_file_notice(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_notice_set:
self.file_notice_set = True
if validations.validate_file_notice(text):
self.file(doc).notice = str_from_text(text)
else:
raise SPDXValueError('File::Notice')
else:
raise CardinalityError('File::Notice')
else:
raise OrderError('File::Notice')
def add_file_contribution(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_contrib(value)
else:
raise OrderError('File::Contributor')
def add_file_dep(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_depend(value)
else:
raise OrderError('File::Dependency')
def set_file_atrificat_of_project(self, doc, symbol, value):
"""
Set a file name, uri or home artificat.
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_artifact(symbol, value)
else:
raise OrderError('File::Artificat')
def file(self, doc):
"""
Return the last file in the document's package's file list.
"""
return doc.package.files[-1]
def has_file(self, doc):
"""
Return true if the document's package has at least one file.
Does not test if the document has a package.
"""
return len(doc.package.files) != 0
def has_package(self, doc):
"""
Return true if the document has a package.
"""
return doc.package is not None
def reset_file_stat(self):
"""
Reset the builder's state to enable building new files.
"""
# FIXME: this state does not make sense
self.file_spdx_id_set = False
self.file_comment_set = False
self.file_type_set = False
self.file_chksum_set = False
self.file_conc_lics_set = False
self.file_license_comment_set = False
self.file_notice_set = False
self.file_copytext_set = False
class LicenseBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_extr_lics()
def extr_lic(self, doc):
"""
Retrieve last license in extracted license list.
"""
return doc.extracted_licenses[-1]
def has_extr_lic(self, doc):
return len(doc.extracted_licenses) != 0
def set_lic_id(self, doc, lic_id):
"""
Add a new extracted license to the document.
Raise SPDXValueError if data format is incorrect.
"""
# FIXME: this state does not make sense
self.reset_extr_lics()
if validations.validate_extracted_lic_id(lic_id):
doc.add_extr_lic(document.ExtractedLicense(lic_id))
return True
else:
raise SPDXValueError('ExtractedLicense::id')
def set_lic_text(self, doc, text):
"""
Set license extracted text.
Raise SPDXValueError if text is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError('ExtractedLicense::text')
else:
raise CardinalityError('ExtractedLicense::text')
else:
raise OrderError('ExtractedLicense::text')
def set_lic_name(self, doc, name):
"""
Set license name.
Raise SPDXValueError if name is not str or utils.NoAssert
Raise OrderError if no license id defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_name_set:
self.extr_lic_name_set = True
if validations.validate_extr_lic_name(name):
self.extr_lic(doc).full_name = name
return True
else:
raise SPDXValueError('ExtractedLicense::Name')
else:
raise CardinalityError('ExtractedLicense::Name')
else:
raise OrderError('ExtractedLicense::Name')
def set_lic_comment(self, doc, comment):
"""
Set license comment.
Raise SPDXValueError if comment is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_comment_set:
self.extr_lic_comment_set = True
if validations.validate_is_free_form_text(comment):
self.extr_lic(doc).comment = str_from_text(comment)
return True
else:
raise SPDXValueError('ExtractedLicense::comment')
else:
raise CardinalityError('ExtractedLicense::comment')
else:
raise OrderError('ExtractedLicense::comment')
def add_lic_xref(self, doc, ref):
"""
Add a license cross reference.
Raise OrderError if no License ID defined.
"""
if self.has_extr_lic(doc):
self.extr_lic(doc).add_xref(ref)
return True
else:
raise OrderError('ExtractedLicense::CrossRef')
def reset_extr_lics(self):
# FIXME: this state does not make sense
self.extr_text_set = False
self.extr_lic_name_set = False
self.extr_lic_comment_set = False
class SnippetBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_snippet()
def create_snippet(self, doc, spdx_id):
"""
Create a snippet for the SPDX Document.
spdx_id - To uniquely identify any element in an SPDX document which
may be referenced by other elements.
Raise SPDXValueError if the data is a malformed value.
"""
self.reset_snippet()
spdx_id = spdx_id.split('#')[-1]
if validations.validate_snippet_spdx_id(spdx_id):
doc.add_snippet(snippet.Snippet(spdx_id=spdx_id))
self.snippet_spdx_id_set = True
return True
else:
raise SPDXValueError('Snippet::SnippetSPDXID')
def set_snippet_name(self, doc, name):
"""
Set name of the snippet.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if the name is already set.
"""
self.assert_snippet_exists()
if not self.snippet_name_set:
self.snippet_name_set = True
doc.snippet[-1].name = name
return True
else:
raise CardinalityError('SnippetName')
def set_snippet_comment(self, doc, comment):
"""
Set general comments about the snippet.
Raise OrderError if no snippet previously defined.
Raise SPDXValueError if the data is a malformed value.
Raise CardinalityError if comment already set.
"""
self.assert_snippet_exists()
if not self.snippet_comment_set:
self.snippet_comment_set = True
if validations.validate_snip_comment(comment):
doc.snippet[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError('Snippet::SnippetComment')
else:
raise CardinalityError('Snippet::SnippetComment')
def set_snippet_copyright(self, doc, text):
"""Set the snippet's copyright text.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if text is not one of [None, NOASSERT, TEXT].
"""
self.assert_snippet_exists()
if not self.snippet_copyright_set:
self.snippet_copyright_set = True
if validations.validate_snippet_copyright(text):
if isinstance(text, string_types):
doc.snippet[-1].copyright = str_from_text(text)
else:
doc.snippet[-1].copyright = text # None or NoAssert
else:
raise SPDXValueError('Snippet::SnippetCopyrightText')
else:
raise CardinalityError('Snippet::SnippetCopyrightText')
def set_snippet_lic_comment(self, doc, text):
"""
Set the snippet's license comment.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if not self.snippet_lic_comment_set:
self.snippet_lic_comment_set = True
if validations.validate_snip_lic_comment(text):
doc.snippet[-1].license_comment = str_from_text(text)
return True
else:
raise SPDXValueError('Snippet::SnippetLicenseComments')
else:
raise CardinalityError('Snippet::SnippetLicenseComments')
def set_snip_from_file_spdxid(self, doc, snip_from_file_spdxid):
"""
Set the snippet's 'Snippet from File SPDX Identifier'.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
snip_from_file_spdxid = snip_from_file_spdxid.split('#')[-1]
if not self.snip_file_spdxid_set:
self.snip_file_spdxid_set = True
if validations.validate_snip_file_spdxid(snip_from_file_spdxid):
doc.snippet[-1].snip_from_file_spdxid = snip_from_file_spdxid
return True
else:
raise SPDXValueError('Snippet::SnippetFromFileSPDXID')
else:
raise CardinalityError('Snippet::SnippetFromFileSPDXID')
def set_snip_concluded_license(self, doc, conc_lics):
"""
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if not self.snippet_conc_lics_set:
self.snippet_conc_lics_set = True
if validations.validate_lics_conc(conc_lics):
doc.snippet[-1].conc_lics = conc_lics
return True
else:
raise SPDXValueError('Snippet::SnippetLicenseConcluded')
else:
raise CardinalityError('Snippet::SnippetLicenseConcluded')
def set_snippet_lics_info(self, doc, lics_info):
"""
Raise OrderError if no snippet previously defined.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if validations.validate_snip_lics_info(lics_info):
doc.snippet[-1].add_lics(lics_info)
return True
else:
raise SPDXValueError('Snippet::LicenseInfoInSnippet')
def reset_snippet(self):
# FIXME: this state does not make sense
self.snippet_spdx_id_set = False
self.snippet_name_set = False
self.snippet_comment_set = False
self.snippet_copyright_set = False
self.snippet_lic_comment_set = False
self.snip_file_spdxid_set = False
self.snippet_conc_lics_set = False
def assert_snippet_exists(self):
if not self.snippet_spdx_id_set:
raise OrderError('Snippet')
class Builder(DocBuilder, CreationInfoBuilder, EntityBuilder, ReviewBuilder,
PackageBuilder, FileBuilder, LicenseBuilder, SnippetBuilder,
ExternalDocumentRefBuilder, AnnotationBuilder):
"""
SPDX document builder.
"""
def __init__(self):
super(Builder, self).__init__()
# FIXME: this state does not make sense
self.reset()
def reset(self):
"""
Reset builder's state for building new documents.
Must be called between usage with different documents.
"""
# FIXME: this state does not make sense
self.reset_creation_info()
self.reset_document()
self.reset_package()
self.reset_file_stat()
self.reset_reviews()
self.reset_annotations()
self.reset_extr_lics()
self.reset_snippet() | spdx/parsers/tagvaluebuilders.py |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
from six import string_types
from spdx import annotation
from spdx import checksum
from spdx import creationinfo
from spdx import document
from spdx import file
from spdx import package
from spdx import review
from spdx import snippet
from spdx import utils
from spdx import version
from spdx.document import ExternalDocumentRef
from spdx.parsers.builderexceptions import CardinalityError
from spdx.parsers.builderexceptions import OrderError
from spdx.parsers.builderexceptions import SPDXValueError
from spdx.parsers import validations
def checksum_from_sha1(value):
"""
Return an spdx.checksum.Algorithm instance representing the SHA1
checksum or None if does not match CHECKSUM_RE.
"""
# More constrained regex at lexer level
CHECKSUM_RE = re.compile('SHA1:\\s*([\\S]+)', re.UNICODE)
match = CHECKSUM_RE.match(value)
if match:
return checksum.Algorithm(identifier='SHA1', value=match.group(1))
else:
return None
def str_from_text(text):
"""
Return content of a free form text block as a string.
"""
REGEX = re.compile('<text>((.|\n)+)</text>', re.UNICODE)
match = REGEX.match(text)
if match:
return match.group(1)
else:
return None
class DocBuilder(object):
"""
Set the fields of the top level document model.
"""
VERS_STR_REGEX = re.compile(r'SPDX-(\d+)\.(\d+)', re.UNICODE)
def __init__(self):
# FIXME: this state does not make sense
self.reset_document()
def set_doc_version(self, doc, value):
"""
Set the document version.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_version_set:
self.doc_version_set = True
m = self.VERS_STR_REGEX.match(value)
if m is None:
raise SPDXValueError('Document::Version')
else:
doc.version = version.Version(major=int(m.group(1)),
minor=int(m.group(2)))
return True
else:
raise CardinalityError('Document::Version')
def set_doc_data_lics(self, doc, lics):
"""
Set the document data license.
Raise value error if malformed value
Raise CardinalityError if already defined.
"""
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
if validations.validate_data_lics(lics):
doc.data_license = document.License.from_identifier(lics)
return True
else:
raise SPDXValueError('Document::DataLicense')
else:
raise CardinalityError('Document::DataLicense')
def set_doc_name(self, doc, name):
"""
Set the document name.
Raise CardinalityError if already defined.
"""
if not self.doc_name_set:
doc.name = name
self.doc_name_set = True
return True
else:
raise CardinalityError('Document::Name')
def set_doc_spdx_id(self, doc, doc_spdx_id_line):
"""
Set the document SPDX Identifier.
Raise value error if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_spdx_id_set:
if doc_spdx_id_line == 'SPDXRef-DOCUMENT':
doc.spdx_id = doc_spdx_id_line
self.doc_spdx_id_set = True
return True
else:
raise SPDXValueError('Document::SPDXID')
else:
raise CardinalityError('Document::SPDXID')
def set_doc_comment(self, doc, comment):
"""
Set document comment.
Raise CardinalityError if comment already set.
Raise SPDXValueError if comment is not free form text.
"""
if not self.doc_comment_set:
self.doc_comment_set = True
if validations.validate_doc_comment(comment):
doc.comment = str_from_text(comment)
return True
else:
raise SPDXValueError('Document::Comment')
else:
raise CardinalityError('Document::Comment')
def set_doc_namespace(self, doc, namespace):
"""
Set the document namespace.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_namespace_set:
self.doc_namespace_set = True
if validations.validate_doc_namespace(namespace):
doc.namespace = namespace
return True
else:
raise SPDXValueError('Document::Namespace')
else:
raise CardinalityError('Document::Comment')
def reset_document(self):
"""
Reset the state to allow building new documents
"""
# FIXME: this state does not make sense
self.doc_version_set = False
self.doc_comment_set = False
self.doc_namespace_set = False
self.doc_data_lics_set = False
self.doc_name_set = False
self.doc_spdx_id_set = False
class ExternalDocumentRefBuilder(object):
def set_ext_doc_id(self, doc, ext_doc_id):
"""
Set the `external_document_id` attribute of the `ExternalDocumentRef` object.
"""
doc.add_ext_document_reference(
ExternalDocumentRef(
external_document_id=ext_doc_id))
def set_spdx_doc_uri(self, doc, spdx_doc_uri):
"""
Set the `spdx_document_uri` attribute of the `ExternalDocumentRef` object.
"""
if validations.validate_doc_namespace(spdx_doc_uri):
doc.ext_document_references[-1].spdx_document_uri = spdx_doc_uri
else:
raise SPDXValueError('Document::ExternalDocumentRef')
def set_chksum(self, doc, chksum):
"""
Set the `check_sum` attribute of the `ExternalDocumentRef` object.
"""
doc.ext_document_references[-1].check_sum = checksum_from_sha1(
chksum)
def add_ext_doc_refs(self, doc, ext_doc_id, spdx_doc_uri, chksum):
self.set_ext_doc_id(doc, ext_doc_id)
self.set_spdx_doc_uri(doc, spdx_doc_uri)
self.set_chksum(doc, chksum)
class EntityBuilder(object):
tool_re = re.compile(r'Tool:\s*(.+)', re.UNICODE)
person_re = re.compile(r'Person:\s*(([^(])+)(\((.*)\))?', re.UNICODE)
org_re = re.compile(r'Organization:\s*(([^(])+)(\((.*)\))?', re.UNICODE)
PERSON_NAME_GROUP = 1
PERSON_EMAIL_GROUP = 4
ORG_NAME_GROUP = 1
ORG_EMAIL_GROUP = 4
TOOL_NAME_GROUP = 1
def build_tool(self, doc, entity):
"""
Build a tool object out of a string representation.
Return built tool.
Raise SPDXValueError if failed to extract tool name or name is malformed
"""
match = self.tool_re.match(entity)
if match and validations.validate_tool_name(match.group(self.TOOL_NAME_GROUP)):
name = match.group(self.TOOL_NAME_GROUP)
return creationinfo.Tool(name)
else:
raise SPDXValueError('Failed to extract tool name')
def build_org(self, doc, entity):
"""
Build an organization object of of a string representation.
Return built organization.
Raise SPDXValueError if failed to extractname.
"""
match = self.org_re.match(entity)
if match and validations.validate_org_name(match.group(self.ORG_NAME_GROUP)):
name = match.group(self.ORG_NAME_GROUP).strip()
email = match.group(self.ORG_EMAIL_GROUP)
if (email is not None) and (len(email) != 0):
return creationinfo.Organization(name=name, email=email.strip())
else:
return creationinfo.Organization(name=name, email=None)
else:
raise SPDXValueError('Failed to extract Organization name')
def build_person(self, doc, entity):
"""
Build an organization object of of a string representation.
Return built organization. Raise SPDXValueError if failed to extract name.
"""
match = self.person_re.match(entity)
if match and validations.validate_person_name(match.group(self.PERSON_NAME_GROUP)):
name = match.group(self.PERSON_NAME_GROUP).strip()
email = match.group(self.PERSON_EMAIL_GROUP)
if (email is not None) and (len(email) != 0):
return creationinfo.Person(name=name, email=email.strip())
else:
return creationinfo.Person(name=name, email=None)
else:
raise SPDXValueError('Failed to extract person name')
class CreationInfoBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_creation_info()
def add_creator(self, doc, creator):
"""
Add a creator to the document's creation info.
Return true if creator is valid.
Creator must be built by an EntityBuilder.
Raise SPDXValueError if not a creator type.
"""
if validations.validate_creator(creator):
doc.creation_info.add_creator(creator)
return True
else:
raise SPDXValueError('CreationInfo::Creator')
def set_created_date(self, doc, created):
"""
Set created date.
Raise CardinalityError if created date already set.
Raise SPDXValueError if created is not a date.
"""
if not self.created_date_set:
self.created_date_set = True
date = utils.datetime_from_iso_format(created)
if date is not None:
doc.creation_info.created = date
return True
else:
raise SPDXValueError('CreationInfo::Date')
else:
raise CardinalityError('CreationInfo::Created')
def set_creation_comment(self, doc, comment):
"""
Set creation comment.
Raise CardinalityError if comment already set.
Raise SPDXValueError if not free form text.
"""
if not self.creation_comment_set:
self.creation_comment_set = True
if validations.validate_creation_comment(comment):
doc.creation_info.comment = str_from_text(comment)
return True
else:
raise SPDXValueError('CreationInfo::Comment')
else:
raise CardinalityError('CreationInfo::Comment')
def set_lics_list_ver(self, doc, value):
"""
Set the license list version.
Raise CardinalityError if already set.
Raise SPDXValueError if incorrect value.
"""
if not self.lics_list_ver_set:
self.lics_list_ver_set = True
vers = version.Version.from_str(value)
if vers is not None:
doc.creation_info.license_list_version = vers
return True
else:
raise SPDXValueError('CreationInfo::LicenseListVersion')
else:
raise CardinalityError('CreationInfo::LicenseListVersion')
def reset_creation_info(self):
"""
Reset builder state to allow building new creation info.
"""
# FIXME: this state does not make sense
self.created_date_set = False
self.creation_comment_set = False
self.lics_list_ver_set = False
class ReviewBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_reviews()
def reset_reviews(self):
"""
Reset the builder's state to allow building new reviews.
"""
# FIXME: this state does not make sense
self.review_date_set = False
self.review_comment_set = False
def add_reviewer(self, doc, reviewer):
"""
Adds a reviewer to the SPDX Document.
Reviwer is an entity created by an EntityBuilder.
Raise SPDXValueError if not a valid reviewer type.
"""
# Each reviewer marks the start of a new review object.
# FIXME: this state does not make sense
self.reset_reviews()
if validations.validate_reviewer(reviewer):
doc.add_review(review.Review(reviewer=reviewer))
return True
else:
raise SPDXValueError('Review::Reviewer')
def add_review_date(self, doc, reviewed):
"""
Set the review date.
Raise CardinalityError if already set.
Raise OrderError if no reviewer defined before.
Raise SPDXValueError if invalid reviewed value.
"""
if len(doc.reviews) != 0:
if not self.review_date_set:
self.review_date_set = True
date = utils.datetime_from_iso_format(reviewed)
if date is not None:
doc.reviews[-1].review_date = date
return True
else:
raise SPDXValueError('Review::ReviewDate')
else:
raise CardinalityError('Review::ReviewDate')
else:
raise OrderError('Review::ReviewDate')
def add_review_comment(self, doc, comment):
"""
Set the review comment.
Raise CardinalityError if already set.
Raise OrderError if no reviewer defined before.
Raise SPDXValueError if comment is not free form text.
"""
if len(doc.reviews) != 0:
if not self.review_comment_set:
self.review_comment_set = True
if validations.validate_review_comment(comment):
doc.reviews[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError('ReviewComment::Comment')
else:
raise CardinalityError('ReviewComment')
else:
raise OrderError('ReviewComment')
class AnnotationBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_annotations()
def reset_annotations(self):
"""
Reset the builder's state to allow building new annotations.
"""
# FIXME: this state does not make sense
self.annotation_date_set = False
self.annotation_comment_set = False
self.annotation_type_set = False
self.annotation_spdx_id_set = False
def add_annotator(self, doc, annotator):
"""
Add an annotator to the SPDX Document.
Annotator is an entity created by an EntityBuilder.
Raise SPDXValueError if not a valid annotator type.
"""
# Each annotator marks the start of a new annotation object.
# FIXME: this state does not make sense
self.reset_annotations()
if validations.validate_annotator(annotator):
doc.add_annotation(annotation.Annotation(annotator=annotator))
return True
else:
raise SPDXValueError('Annotation::Annotator')
def add_annotation_date(self, doc, annotation_date):
"""
Set the annotation date.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if invalid value.
"""
if len(doc.annotations) != 0:
if not self.annotation_date_set:
self.annotation_date_set = True
date = utils.datetime_from_iso_format(annotation_date)
if date is not None:
doc.annotations[-1].annotation_date = date
return True
else:
raise SPDXValueError('Annotation::AnnotationDate')
else:
raise CardinalityError('Annotation::AnnotationDate')
else:
raise OrderError('Annotation::AnnotationDate')
def add_annotation_comment(self, doc, comment):
"""
Set the annotation comment.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if comment is not free form text.
"""
if len(doc.annotations) != 0:
if not self.annotation_comment_set:
self.annotation_comment_set = True
if validations.validate_annotation_comment(comment):
doc.annotations[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError('AnnotationComment::Comment')
else:
raise CardinalityError('AnnotationComment::Comment')
else:
raise OrderError('AnnotationComment::Comment')
def add_annotation_type(self, doc, annotation_type):
"""
Set the annotation type.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if invalid value.
"""
if len(doc.annotations) != 0:
if not self.annotation_type_set:
self.annotation_type_set = True
if validations.validate_annotation_type(annotation_type):
doc.annotations[-1].annotation_type = annotation_type
return True
else:
raise SPDXValueError('Annotation::AnnotationType')
else:
raise CardinalityError('Annotation::AnnotationType')
else:
raise OrderError('Annotation::AnnotationType')
def set_annotation_spdx_id(self, doc, spdx_id):
"""
Set the annotation SPDX Identifier.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
"""
if len(doc.annotations) != 0:
if not self.annotation_spdx_id_set:
self.annotation_spdx_id_set = True
doc.annotations[-1].spdx_id = spdx_id
return True
else:
raise CardinalityError('Annotation::SPDXREF')
else:
raise OrderError('Annotation::SPDXREF')
class PackageBuilder(object):
VERIF_CODE_REGEX = re.compile(r"([0-9a-f]+)\s*(\(\s*(.+)\))?", re.UNICODE)
VERIF_CODE_CODE_GRP = 1
VERIF_CODE_EXC_FILES_GRP = 3
def __init__(self):
# FIXME: this state does not make sense
self.reset_package()
def reset_package(self):
"""Resets the builder's state in order to build new packages."""
# FIXME: this state does not make sense
self.package_set = False
self.package_spdx_id_set = False
self.package_vers_set = False
self.package_file_name_set = False
self.package_supplier_set = False
self.package_originator_set = False
self.package_down_location_set = False
self.package_files_analyzed_set = False
self.package_home_set = False
self.package_verif_set = False
self.package_chk_sum_set = False
self.package_source_info_set = False
self.package_conc_lics_set = False
self.package_license_declared_set = False
self.package_license_comment_set = False
self.package_cr_text_set = False
self.package_summary_set = False
self.package_desc_set = False
self.package_comment_set = False
self.pkg_ext_comment_set = False
def create_package(self, doc, name):
"""
Create a package for the SPDX Document.
name - any string.
Raise CardinalityError if package already defined.
"""
if not self.package_set:
self.package_set = True
doc.package = package.Package(name=name)
return True
else:
raise CardinalityError('Package::Name')
def set_pkg_spdx_id(self, doc, spdx_id):
"""
Set the Package SPDX Identifier.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
self.assert_package_exists()
if not self.package_spdx_id_set:
if validations.validate_pkg_spdx_id(spdx_id):
doc.package.spdx_id = spdx_id
self.package_spdx_id_set = True
return True
else:
raise SPDXValueError('Package::SPDXID')
else:
raise CardinalityError('Package::SPDXID')
def set_pkg_vers(self, doc, version):
"""
Set package version, if not already set.
version - Any string.
Raise CardinalityError if already has a version.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_vers_set:
self.package_vers_set = True
doc.package.version = version
return True
else:
raise CardinalityError('Package::Version')
def set_pkg_file_name(self, doc, name):
"""
Set the package file name, if not already set.
name - Any string.
Raise CardinalityError if already has a file_name.
Raise OrderError if no pacakge previously defined.
"""
self.assert_package_exists()
if not self.package_file_name_set:
self.package_file_name_set = True
doc.package.file_name = name
return True
else:
raise CardinalityError('Package::FileName')
def set_pkg_supplier(self, doc, entity):
"""
Set the package supplier, if not already set.
entity - Organization, Person or NoAssert.
Raise CardinalityError if already has a supplier.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_supplier_set:
self.package_supplier_set = True
if validations.validate_pkg_supplier(entity):
doc.package.supplier = entity
return True
else:
raise SPDXValueError('Package::Supplier')
else:
raise CardinalityError('Package::Supplier')
def set_pkg_originator(self, doc, entity):
"""
Set the package originator, if not already set.
entity - Organization, Person or NoAssert.
Raise CardinalityError if already has an originator.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_originator_set:
self.package_originator_set = True
if validations.validate_pkg_originator(entity):
doc.package.originator = entity
return True
else:
raise SPDXValueError('Package::Originator')
else:
raise CardinalityError('Package::Originator')
def set_pkg_down_location(self, doc, location):
"""
Set the package download location, if not already set.
location - A string
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_down_location_set:
self.package_down_location_set = True
doc.package.download_location = location
return True
else:
raise CardinalityError('Package::DownloadLocation')
def set_pkg_files_analyzed(self, doc, files_analyzed):
"""
Set the package files analyzed, if not already set.
Raise SPDXValueError if malformed value, CardinalityError if
already defined.
"""
self.assert_package_exists()
if not self.package_files_analyzed_set:
if files_analyzed:
if validations.validate_pkg_files_analyzed(files_analyzed):
self.package_files_analyzed_set = True
doc.package.files_analyzed = files_analyzed
print(doc.package.files_analyzed)
return True
else:
raise SPDXValueError('Package::FilesAnalyzed')
else:
raise CardinalityError('Package::FilesAnalyzed')
def set_pkg_home(self, doc, location):
"""Set the package homepage location if not already set.
location - A string or None or NoAssert.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise SPDXValueError if location has incorrect value.
"""
self.assert_package_exists()
if not self.package_home_set:
self.package_home_set = True
if validations.validate_pkg_homepage(location):
doc.package.homepage = location
return True
else:
raise SPDXValueError('Package::HomePage')
else:
raise CardinalityError('Package::HomePage')
def set_pkg_verif_code(self, doc, code):
"""
Set the package verification code, if not already set.
code - A string.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise Value error if doesn't match verifcode form
"""
self.assert_package_exists()
if not self.package_verif_set:
self.package_verif_set = True
match = self.VERIF_CODE_REGEX.match(code)
if match:
doc.package.verif_code = match.group(self.VERIF_CODE_CODE_GRP)
if match.group(self.VERIF_CODE_EXC_FILES_GRP) is not None:
doc.package.verif_exc_files = match.group(self.VERIF_CODE_EXC_FILES_GRP).split(',')
return True
else:
raise SPDXValueError('Package::VerificationCode')
else:
raise CardinalityError('Package::VerificationCode')
def set_pkg_chk_sum(self, doc, chk_sum):
"""
Set the package check sum, if not already set.
chk_sum - A string
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_chk_sum_set:
self.package_chk_sum_set = True
doc.package.check_sum = checksum_from_sha1(chk_sum)
return True
else:
raise CardinalityError('Package::CheckSum')
def set_pkg_source_info(self, doc, text):
"""
Set the package's source information, if not already set.
text - Free form text.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
SPDXValueError if text is not free form text.
"""
self.assert_package_exists()
if not self.package_source_info_set:
self.package_source_info_set = True
if validations.validate_pkg_src_info(text):
doc.package.source_info = str_from_text(text)
return True
else:
raise SPDXValueError('Pacckage::SourceInfo')
else:
raise CardinalityError('Package::SourceInfo')
def set_pkg_licenses_concluded(self, doc, licenses):
"""
Set the package's concluded licenses.
licenses - License info.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise SPDXValueError if data malformed.
"""
self.assert_package_exists()
if not self.package_conc_lics_set:
self.package_conc_lics_set = True
if validations.validate_lics_conc(licenses):
doc.package.conc_lics = licenses
return True
else:
raise SPDXValueError('Package::ConcludedLicenses')
else:
raise CardinalityError('Package::ConcludedLicenses')
def set_pkg_license_from_file(self, doc, lic):
"""
Add a license from a file to the package.
Raise SPDXValueError if data malformed.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if validations.validate_lics_from_file(lic):
doc.package.licenses_from_files.append(lic)
return True
else:
raise SPDXValueError('Package::LicensesFromFile')
def set_pkg_license_declared(self, doc, lic):
"""
Set the package's declared license.
Raise SPDXValueError if data malformed.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
"""
self.assert_package_exists()
if not self.package_license_declared_set:
self.package_license_declared_set = True
if validations.validate_lics_conc(lic):
doc.package.license_declared = lic
return True
else:
raise SPDXValueError('Package::LicenseDeclared')
else:
raise CardinalityError('Package::LicenseDeclared')
def set_pkg_license_comment(self, doc, text):
"""
Set the package's license comment.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if text is not free form text.
"""
self.assert_package_exists()
if not self.package_license_comment_set:
self.package_license_comment_set = True
if validations.validate_pkg_lics_comment(text):
doc.package.license_comment = str_from_text(text)
return True
else:
raise SPDXValueError('Package::LicenseComment')
else:
raise CardinalityError('Package::LicenseComment')
def set_pkg_cr_text(self, doc, text):
"""
Set the package's copyright text.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
Raise value error if text is not one of [None, NOASSERT, TEXT].
"""
self.assert_package_exists()
if not self.package_cr_text_set:
self.package_cr_text_set = True
if validations.validate_pkg_cr_text(text):
if isinstance(text, string_types):
doc.package.cr_text = str_from_text(text)
else:
doc.package.cr_text = text # None or NoAssert
else:
raise SPDXValueError('Package::CopyrightText')
else:
raise CardinalityError('Package::CopyrightText')
def set_pkg_summary(self, doc, text):
"""
Set the package summary.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if summary already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_summary_set:
self.package_summary_set = True
if validations.validate_pkg_summary(text):
doc.package.summary = str_from_text(text)
else:
raise SPDXValueError('Package::Summary')
else:
raise CardinalityError('Package::Summary')
def set_pkg_desc(self, doc, text):
"""
Set the package's description.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if description already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_desc_set:
self.package_desc_set = True
if validations.validate_pkg_desc(text):
doc.package.description = str_from_text(text)
else:
raise SPDXValueError('Package::Description')
else:
raise CardinalityError('Package::Description')
def set_pkg_comment(self, doc, text):
"""
Set the package's comment.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if comment already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_comment_set:
self.package_comment_set = True
if validations.validate_pkg_comment(text):
doc.package.comment = str_from_text(text)
else:
raise SPDXValueError('Package::Comment')
else:
raise CardinalityError('Package::Comment')
def set_pkg_ext_ref_category(self, doc, category):
"""
Set the `category` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_category(category):
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].category is None):
doc.package.pkg_ext_refs[-1].category = category
else:
doc.package.add_pkg_ext_refs(
package.ExternalPackageRef(category=category))
else:
raise SPDXValueError('ExternalRef::Category')
def set_pkg_ext_ref_type(self, doc, pkg_ext_ref_type):
"""
Set the `pkg_ext_ref_type` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_type(pkg_ext_ref_type):
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].pkg_ext_ref_type is None):
doc.package.pkg_ext_refs[-1].pkg_ext_ref_type = pkg_ext_ref_type
else:
doc.package.add_pkg_ext_refs(package.ExternalPackageRef(
pkg_ext_ref_type=pkg_ext_ref_type))
else:
raise SPDXValueError('ExternalRef::Type')
def set_pkg_ext_ref_locator(self, doc, locator):
"""
Set the `locator` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if (len(doc.package.pkg_ext_refs) and
doc.package.pkg_ext_refs[-1].locator is None):
doc.package.pkg_ext_refs[-1].locator = locator
else:
doc.package.add_pkg_ext_refs(package.ExternalPackageRef(
locator=locator))
def add_pkg_ext_ref_comment(self, doc, comment):
"""
Set the `comment` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if not len(doc.package.pkg_ext_refs):
raise OrderError('Package::ExternalRef')
else:
if validations.validate_pkg_ext_ref_comment(comment):
doc.package.pkg_ext_refs[-1].comment = str_from_text(comment)
else:
raise SPDXValueError('ExternalRef::Comment')
def add_pkg_ext_refs(self, doc, category, pkg_ext_ref_type, locator):
self.set_pkg_ext_ref_category(doc, category)
self.set_pkg_ext_ref_type(doc, pkg_ext_ref_type)
self.set_pkg_ext_ref_locator(doc, locator)
def assert_package_exists(self):
if not self.package_set:
raise OrderError('Package')
class FileBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_file_stat()
def set_file_name(self, doc, name):
"""
Raise OrderError if no package defined.
"""
if self.has_package(doc):
doc.package.files.append(file.File(name))
# A file name marks the start of a new file instance.
# The builder must be reset
# FIXME: this state does not make sense
self.reset_file_stat()
return True
else:
raise OrderError('File::Name')
def set_file_spdx_id(self, doc, spdx_id):
"""
Set the file SPDX Identifier.
Raise OrderError if no package or no file defined.
Raise SPDXValueError if malformed value.
Raise CardinalityError if more than one spdx_id set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_spdx_id_set:
self.file_spdx_id_set = True
if validations.validate_file_spdx_id(spdx_id):
self.file(doc).spdx_id = spdx_id
return True
else:
raise SPDXValueError('File::SPDXID')
else:
raise CardinalityError('File::SPDXID')
else:
raise OrderError('File::SPDXID')
def set_file_comment(self, doc, text):
"""
Raise OrderError if no package or no file defined.
Raise CardinalityError if more than one comment set.
Raise SPDXValueError if text is not free form text.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_comment_set:
self.file_comment_set = True
if validations.validate_file_comment(text):
self.file(doc).comment = str_from_text(text)
return True
else:
raise SPDXValueError('File::Comment')
else:
raise CardinalityError('File::Comment')
else:
raise OrderError('File::Comment')
def set_file_type(self, doc, type_value):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one type set.
Raise SPDXValueError if type is unknown.
"""
type_dict = {
'SOURCE': file.FileType.SOURCE,
'BINARY': file.FileType.BINARY,
'ARCHIVE': file.FileType.ARCHIVE,
'OTHER': file.FileType.OTHER
}
if self.has_package(doc) and self.has_file(doc):
if not self.file_type_set:
self.file_type_set = True
if type_value in type_dict.keys():
self.file(doc).type = type_dict[type_value]
return True
else:
raise SPDXValueError('File::Type')
else:
raise CardinalityError('File::Type')
else:
raise OrderError('File::Type')
def set_file_chksum(self, doc, chksum):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one chksum set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_chksum_set:
self.file_chksum_set = True
self.file(doc).chk_sum = checksum_from_sha1(chksum)
return True
else:
raise CardinalityError('File::CheckSum')
else:
raise OrderError('File::CheckSum')
def set_concluded_license(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if already set.
Raise SPDXValueError if malformed.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_conc_lics_set:
self.file_conc_lics_set = True
if validations.validate_lics_conc(lic):
self.file(doc).conc_lics = lic
return True
else:
raise SPDXValueError('File::ConcludedLicense')
else:
raise CardinalityError('File::ConcludedLicense')
else:
raise OrderError('File::ConcludedLicense')
def set_file_license_in_file(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if malformed value.
"""
if self.has_package(doc) and self.has_file(doc):
if validations.validate_file_lics_in_file(lic):
self.file(doc).add_lics(lic)
return True
else:
raise SPDXValueError('File::LicenseInFile')
else:
raise OrderError('File::LicenseInFile')
def set_file_license_comment(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
if validations.validate_file_lics_comment(text):
self.file(doc).license_comment = str_from_text(text)
else:
raise SPDXValueError('File::LicenseComment')
else:
raise CardinalityError('File::LicenseComment')
else:
raise OrderError('File::LicenseComment')
def set_file_copyright(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text or NONE or NO_ASSERT.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_copytext_set:
self.file_copytext_set = True
if validations.validate_file_cpyright(text):
if isinstance(text, string_types):
self.file(doc).copyright = str_from_text(text)
else:
self.file(doc).copyright = text # None or NoAssert
return True
else:
raise SPDXValueError('File::CopyRight')
else:
raise CardinalityError('File::CopyRight')
else:
raise OrderError('File::CopyRight')
def set_file_notice(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_notice_set:
self.file_notice_set = True
if validations.validate_file_notice(text):
self.file(doc).notice = str_from_text(text)
else:
raise SPDXValueError('File::Notice')
else:
raise CardinalityError('File::Notice')
else:
raise OrderError('File::Notice')
def add_file_contribution(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_contrib(value)
else:
raise OrderError('File::Contributor')
def add_file_dep(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_depend(value)
else:
raise OrderError('File::Dependency')
def set_file_atrificat_of_project(self, doc, symbol, value):
"""
Set a file name, uri or home artificat.
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_artifact(symbol, value)
else:
raise OrderError('File::Artificat')
def file(self, doc):
"""
Return the last file in the document's package's file list.
"""
return doc.package.files[-1]
def has_file(self, doc):
"""
Return true if the document's package has at least one file.
Does not test if the document has a package.
"""
return len(doc.package.files) != 0
def has_package(self, doc):
"""
Return true if the document has a package.
"""
return doc.package is not None
def reset_file_stat(self):
"""
Reset the builder's state to enable building new files.
"""
# FIXME: this state does not make sense
self.file_spdx_id_set = False
self.file_comment_set = False
self.file_type_set = False
self.file_chksum_set = False
self.file_conc_lics_set = False
self.file_license_comment_set = False
self.file_notice_set = False
self.file_copytext_set = False
class LicenseBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_extr_lics()
def extr_lic(self, doc):
"""
Retrieve last license in extracted license list.
"""
return doc.extracted_licenses[-1]
def has_extr_lic(self, doc):
return len(doc.extracted_licenses) != 0
def set_lic_id(self, doc, lic_id):
"""
Add a new extracted license to the document.
Raise SPDXValueError if data format is incorrect.
"""
# FIXME: this state does not make sense
self.reset_extr_lics()
if validations.validate_extracted_lic_id(lic_id):
doc.add_extr_lic(document.ExtractedLicense(lic_id))
return True
else:
raise SPDXValueError('ExtractedLicense::id')
def set_lic_text(self, doc, text):
"""
Set license extracted text.
Raise SPDXValueError if text is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError('ExtractedLicense::text')
else:
raise CardinalityError('ExtractedLicense::text')
else:
raise OrderError('ExtractedLicense::text')
def set_lic_name(self, doc, name):
"""
Set license name.
Raise SPDXValueError if name is not str or utils.NoAssert
Raise OrderError if no license id defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_name_set:
self.extr_lic_name_set = True
if validations.validate_extr_lic_name(name):
self.extr_lic(doc).full_name = name
return True
else:
raise SPDXValueError('ExtractedLicense::Name')
else:
raise CardinalityError('ExtractedLicense::Name')
else:
raise OrderError('ExtractedLicense::Name')
def set_lic_comment(self, doc, comment):
"""
Set license comment.
Raise SPDXValueError if comment is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_comment_set:
self.extr_lic_comment_set = True
if validations.validate_is_free_form_text(comment):
self.extr_lic(doc).comment = str_from_text(comment)
return True
else:
raise SPDXValueError('ExtractedLicense::comment')
else:
raise CardinalityError('ExtractedLicense::comment')
else:
raise OrderError('ExtractedLicense::comment')
def add_lic_xref(self, doc, ref):
"""
Add a license cross reference.
Raise OrderError if no License ID defined.
"""
if self.has_extr_lic(doc):
self.extr_lic(doc).add_xref(ref)
return True
else:
raise OrderError('ExtractedLicense::CrossRef')
def reset_extr_lics(self):
# FIXME: this state does not make sense
self.extr_text_set = False
self.extr_lic_name_set = False
self.extr_lic_comment_set = False
class SnippetBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_snippet()
def create_snippet(self, doc, spdx_id):
"""
Create a snippet for the SPDX Document.
spdx_id - To uniquely identify any element in an SPDX document which
may be referenced by other elements.
Raise SPDXValueError if the data is a malformed value.
"""
self.reset_snippet()
spdx_id = spdx_id.split('#')[-1]
if validations.validate_snippet_spdx_id(spdx_id):
doc.add_snippet(snippet.Snippet(spdx_id=spdx_id))
self.snippet_spdx_id_set = True
return True
else:
raise SPDXValueError('Snippet::SnippetSPDXID')
def set_snippet_name(self, doc, name):
"""
Set name of the snippet.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if the name is already set.
"""
self.assert_snippet_exists()
if not self.snippet_name_set:
self.snippet_name_set = True
doc.snippet[-1].name = name
return True
else:
raise CardinalityError('SnippetName')
def set_snippet_comment(self, doc, comment):
"""
Set general comments about the snippet.
Raise OrderError if no snippet previously defined.
Raise SPDXValueError if the data is a malformed value.
Raise CardinalityError if comment already set.
"""
self.assert_snippet_exists()
if not self.snippet_comment_set:
self.snippet_comment_set = True
if validations.validate_snip_comment(comment):
doc.snippet[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError('Snippet::SnippetComment')
else:
raise CardinalityError('Snippet::SnippetComment')
def set_snippet_copyright(self, doc, text):
"""Set the snippet's copyright text.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if text is not one of [None, NOASSERT, TEXT].
"""
self.assert_snippet_exists()
if not self.snippet_copyright_set:
self.snippet_copyright_set = True
if validations.validate_snippet_copyright(text):
if isinstance(text, string_types):
doc.snippet[-1].copyright = str_from_text(text)
else:
doc.snippet[-1].copyright = text # None or NoAssert
else:
raise SPDXValueError('Snippet::SnippetCopyrightText')
else:
raise CardinalityError('Snippet::SnippetCopyrightText')
def set_snippet_lic_comment(self, doc, text):
"""
Set the snippet's license comment.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if not self.snippet_lic_comment_set:
self.snippet_lic_comment_set = True
if validations.validate_snip_lic_comment(text):
doc.snippet[-1].license_comment = str_from_text(text)
return True
else:
raise SPDXValueError('Snippet::SnippetLicenseComments')
else:
raise CardinalityError('Snippet::SnippetLicenseComments')
def set_snip_from_file_spdxid(self, doc, snip_from_file_spdxid):
"""
Set the snippet's 'Snippet from File SPDX Identifier'.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
snip_from_file_spdxid = snip_from_file_spdxid.split('#')[-1]
if not self.snip_file_spdxid_set:
self.snip_file_spdxid_set = True
if validations.validate_snip_file_spdxid(snip_from_file_spdxid):
doc.snippet[-1].snip_from_file_spdxid = snip_from_file_spdxid
return True
else:
raise SPDXValueError('Snippet::SnippetFromFileSPDXID')
else:
raise CardinalityError('Snippet::SnippetFromFileSPDXID')
def set_snip_concluded_license(self, doc, conc_lics):
"""
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if not self.snippet_conc_lics_set:
self.snippet_conc_lics_set = True
if validations.validate_lics_conc(conc_lics):
doc.snippet[-1].conc_lics = conc_lics
return True
else:
raise SPDXValueError('Snippet::SnippetLicenseConcluded')
else:
raise CardinalityError('Snippet::SnippetLicenseConcluded')
def set_snippet_lics_info(self, doc, lics_info):
"""
Raise OrderError if no snippet previously defined.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if validations.validate_snip_lics_info(lics_info):
doc.snippet[-1].add_lics(lics_info)
return True
else:
raise SPDXValueError('Snippet::LicenseInfoInSnippet')
def reset_snippet(self):
# FIXME: this state does not make sense
self.snippet_spdx_id_set = False
self.snippet_name_set = False
self.snippet_comment_set = False
self.snippet_copyright_set = False
self.snippet_lic_comment_set = False
self.snip_file_spdxid_set = False
self.snippet_conc_lics_set = False
def assert_snippet_exists(self):
if not self.snippet_spdx_id_set:
raise OrderError('Snippet')
class Builder(DocBuilder, CreationInfoBuilder, EntityBuilder, ReviewBuilder,
PackageBuilder, FileBuilder, LicenseBuilder, SnippetBuilder,
ExternalDocumentRefBuilder, AnnotationBuilder):
"""
SPDX document builder.
"""
def __init__(self):
super(Builder, self).__init__()
# FIXME: this state does not make sense
self.reset()
def reset(self):
"""
Reset builder's state for building new documents.
Must be called between usage with different documents.
"""
# FIXME: this state does not make sense
self.reset_creation_info()
self.reset_document()
self.reset_package()
self.reset_file_stat()
self.reset_reviews()
self.reset_annotations()
self.reset_extr_lics()
self.reset_snippet() | 0.464173 | 0.111895 |
from bottle import route, run
from phue import Bridge
import os, configparser, logging, logging.config, threading, time
def writeDefaultConfig():
logger.debug('No config found. Writing default config to server.cfg. Please adapt accordingly and restart the server.')
config = configparser.ConfigParser()
config['Server'] = {
'host': 'localhost',
'port': '8080'
}
config['HueBridge'] = {
'ip': '192.168.0.x',
'user': 'some_username'
}
with open('server.cfg', 'w') as configfile:
config.write(configfile)
def getLightByName(name):
light_names = bridge.get_light_objects('name')
return light_names[name]
@route('/api/1.0/wakeUp')
def wakeUp():
light = getLightByName(config['wakeUp']['lightName'])
t = threading.Thread(target = hueSendSeries, args=(light, config['wakeUp']) )
t.deamon = True
t.start()
return "OK"
@route('/api/1.0/sleep')
def sleep():
light = getLightByName(config['sleep']['lightName'])
light.transitiontime = config['sleep'].getint('transitionTime')
light.brightness = config['sleep'].getint('brightness')
light.on = config['sleep'].getboolean('on')
return "OK"
def hueSendSeries(light, config):
steps = config.getint('steps')
for i in range(0, steps):
logger.debug('Sending step %d', i)
light.hue = config.getint('hue' + str(i))
light.sat = config.getint('sat' + str(i))
light.transitiontime = config.getint('transitionTime' + str(i))
light.brightness = config.getint('brightness' + str(i))
light.on = True
time.sleep(config.getint('transitionTime' + str(i)) / 10.0)
# init logging
logging.config.fileConfig('log.cfg')
logger = logging.getLogger('server')
logger.info('Starting home-automation server')
# loading configuration
logger.debug('Loading config')
if not os.path.isfile('server.cfg'):
writeDefaultConfig()
quit()
config = configparser.ConfigParser()
config.read('server.cfg')
logger.info('Connecting to Hue Bridge at %s with user name %s', config['HueBridge']['ip'], config['HueBridge']['user'])
bridge = Bridge( config['HueBridge']['ip'], config['HueBridge']['user'] )
run(host=config['Server']['host'], port=config['Server']['port'], debug=True) | server.py | from bottle import route, run
from phue import Bridge
import os, configparser, logging, logging.config, threading, time
def writeDefaultConfig():
logger.debug('No config found. Writing default config to server.cfg. Please adapt accordingly and restart the server.')
config = configparser.ConfigParser()
config['Server'] = {
'host': 'localhost',
'port': '8080'
}
config['HueBridge'] = {
'ip': '192.168.0.x',
'user': 'some_username'
}
with open('server.cfg', 'w') as configfile:
config.write(configfile)
def getLightByName(name):
light_names = bridge.get_light_objects('name')
return light_names[name]
@route('/api/1.0/wakeUp')
def wakeUp():
light = getLightByName(config['wakeUp']['lightName'])
t = threading.Thread(target = hueSendSeries, args=(light, config['wakeUp']) )
t.deamon = True
t.start()
return "OK"
@route('/api/1.0/sleep')
def sleep():
light = getLightByName(config['sleep']['lightName'])
light.transitiontime = config['sleep'].getint('transitionTime')
light.brightness = config['sleep'].getint('brightness')
light.on = config['sleep'].getboolean('on')
return "OK"
def hueSendSeries(light, config):
steps = config.getint('steps')
for i in range(0, steps):
logger.debug('Sending step %d', i)
light.hue = config.getint('hue' + str(i))
light.sat = config.getint('sat' + str(i))
light.transitiontime = config.getint('transitionTime' + str(i))
light.brightness = config.getint('brightness' + str(i))
light.on = True
time.sleep(config.getint('transitionTime' + str(i)) / 10.0)
# init logging
logging.config.fileConfig('log.cfg')
logger = logging.getLogger('server')
logger.info('Starting home-automation server')
# loading configuration
logger.debug('Loading config')
if not os.path.isfile('server.cfg'):
writeDefaultConfig()
quit()
config = configparser.ConfigParser()
config.read('server.cfg')
logger.info('Connecting to Hue Bridge at %s with user name %s', config['HueBridge']['ip'], config['HueBridge']['user'])
bridge = Bridge( config['HueBridge']['ip'], config['HueBridge']['user'] )
run(host=config['Server']['host'], port=config['Server']['port'], debug=True) | 0.193414 | 0.04056 |
import click
from escpostools.aliases import resolve_alias
from escpostools.cli import pass_context
LONG_RULER = '....:....|' * 8
SHORT_RULER = '....:....|' * 4
@click.command('test', short_help='Runs tests against implementations.')
@click.argument('aliases', type=click.STRING)
@click.option('--all', is_flag=True, help='Run all predefined test sets')
@click.option('--align', is_flag=True, help='Run predefined alignment test set')
@click.option('--modes', is_flag=True, help='Run predefined modes test set')
@click.option('--rulers', is_flag=True, help='Run predefined rulers test set')
@pass_context
def cli(ctx, aliases, all, align, modes, rulers):
"""Runs predefined tests against one or more implementations, sending sets
of commands to the printer(s) throught associated connection method(s).
For this command to work you must assign at least one alias with an
implementation and connection method. See help for "assign" command. For
example, if you want to run "modes" and "align" tests against an
implementation aliased as "tmt20" you type:
\b
$ escpos test tmt20 --align --modes
Or you can run all predefined tests against three aliased implementations:
\b
$ escpos test rm22,tmt20,dr700 --all
"""
impls = [resolve_alias(alias_id) for alias_id in aliases.split(',')]
if all:
align = True
modes = True
rulers = True
for impl in impls:
if align:
_run_align(impl)
if modes:
_run_modes(impl)
if rulers:
_run_rulers(impl)
def _run_align(impl):
impl.init()
impl.text('[Aligment Tests]')
impl.lf()
impl.justify_right()
impl.text('Right Aligned')
impl.justify_center()
impl.text('Centered Text')
impl.justify_left()
impl.text('Left Aligned')
impl.lf(2)
impl.text('This long text paragraph should be left aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_center()
impl.text('This long text paragraph should be centered. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_right()
impl.text('This long text paragraph should be right aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_left()
impl.lf(2)
def _run_modes(impl):
impl.init()
impl.text('[Modes]')
impl.lf()
impl.text('Just normal text.')
impl.lf()
impl.text('Entering condensed...')
impl.set_condensed(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_condensed(False)
impl.text('Condensed mode OFF')
impl.lf()
impl.text('Entering expanded...')
impl.set_expanded(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_expanded(False)
impl.text('Expanded mode OFF')
impl.lf(2)
def _run_rulers(impl):
impl.init()
impl.text('[Rulers]')
impl.lf()
impl.text(LONG_RULER)
impl.lf(2)
impl.set_condensed(True)
impl.text(LONG_RULER)
impl.set_condensed(False)
impl.lf(2)
impl.set_expanded(True)
impl.text(SHORT_RULER)
impl.set_expanded(False)
impl.lf(2) | escpostools/commands/cmd_test.py |
import click
from escpostools.aliases import resolve_alias
from escpostools.cli import pass_context
LONG_RULER = '....:....|' * 8
SHORT_RULER = '....:....|' * 4
@click.command('test', short_help='Runs tests against implementations.')
@click.argument('aliases', type=click.STRING)
@click.option('--all', is_flag=True, help='Run all predefined test sets')
@click.option('--align', is_flag=True, help='Run predefined alignment test set')
@click.option('--modes', is_flag=True, help='Run predefined modes test set')
@click.option('--rulers', is_flag=True, help='Run predefined rulers test set')
@pass_context
def cli(ctx, aliases, all, align, modes, rulers):
"""Runs predefined tests against one or more implementations, sending sets
of commands to the printer(s) throught associated connection method(s).
For this command to work you must assign at least one alias with an
implementation and connection method. See help for "assign" command. For
example, if you want to run "modes" and "align" tests against an
implementation aliased as "tmt20" you type:
\b
$ escpos test tmt20 --align --modes
Or you can run all predefined tests against three aliased implementations:
\b
$ escpos test rm22,tmt20,dr700 --all
"""
impls = [resolve_alias(alias_id) for alias_id in aliases.split(',')]
if all:
align = True
modes = True
rulers = True
for impl in impls:
if align:
_run_align(impl)
if modes:
_run_modes(impl)
if rulers:
_run_rulers(impl)
def _run_align(impl):
impl.init()
impl.text('[Aligment Tests]')
impl.lf()
impl.justify_right()
impl.text('Right Aligned')
impl.justify_center()
impl.text('Centered Text')
impl.justify_left()
impl.text('Left Aligned')
impl.lf(2)
impl.text('This long text paragraph should be left aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_center()
impl.text('This long text paragraph should be centered. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_right()
impl.text('This long text paragraph should be right aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_left()
impl.lf(2)
def _run_modes(impl):
impl.init()
impl.text('[Modes]')
impl.lf()
impl.text('Just normal text.')
impl.lf()
impl.text('Entering condensed...')
impl.set_condensed(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_condensed(False)
impl.text('Condensed mode OFF')
impl.lf()
impl.text('Entering expanded...')
impl.set_expanded(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_expanded(False)
impl.text('Expanded mode OFF')
impl.lf(2)
def _run_rulers(impl):
impl.init()
impl.text('[Rulers]')
impl.lf()
impl.text(LONG_RULER)
impl.lf(2)
impl.set_condensed(True)
impl.text(LONG_RULER)
impl.set_condensed(False)
impl.lf(2)
impl.set_expanded(True)
impl.text(SHORT_RULER)
impl.set_expanded(False)
impl.lf(2) | 0.749454 | 0.181173 |
from numpy import power,min,max,floor
import tensorflow as tf
from tensorflow.keras import Model, Sequential, optimizers, losses
from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM
from tensorflow.keras.callbacks import LearningRateScheduler
tf.keras.backend.set_floatx('float64')
'''
Learning rate adjustment functions.
'''
def noam_decay_lr(warmup):
'''
Wrapper to define noam decay; the wrapper method allows us to make the
lr update depend on additional parameters.
The maximal learning rate under this scheme occurs at epoch = warmup, and
will be equal to initial/warmup.
'''
def schedule(epoch, lr):
# learning scheduler takes current epoch and lr as passes
lrate = lr*power(warmup,-0.5)*min([(epoch+1)*power(warmup,-1.5),power(epoch+1,-0.5)])
return lrate
return LearningRateScheduler(schedule)
def step_decay_lr(initial_lr, drop_factor,drop_every):
'''
Wrapper that just drops the learning rate by a fixed factor (drop_factor) every drop_every
epochs.
'''
def schedule(epoch):
exp_fac = floor((1+epoch)/drop_every)
lrate = initial_lr*power(drop_factor,exp_fac)
return lrate
return LearningRateScheduler(schedule)
def polynomial_decay_lr(max_epochs,poly_pow):
'''
Wrapper that drops the learning rate to zero over max_epochs epochs, with
shape given by poly_pow (set poly_pow = 1 to get linear decay).
'''
def schedule(epoch, lr):
decay = power((1 - (epoch/max_epochs)),poly_pow)
lrate = lr*decay
return lrate
return LearningRateScheduler(schedule)
def constant_lr(initial):
'''
Wrapper that just clamps the learning rate at the initial value forever.
'''
def schedule(epoch):
return initial
return LearningRateScheduler(schedule)
class EARSHOT(Model):
'''
EARSHOT model sub-classing tf.keras.Model
'''
def __init__(self, output_len, model_parameters):
'''
output_len = length of target vector
model_parameters = model hyper parameters pulled from parameters.py
'''
super(EARSHOT, self).__init__(name='earshot')
self.model_parameters = model_parameters
self.mask = Masking(mask_value=-9999, name="mask")
if self.model_parameters.hidden['type'] == "LSTM":
self.hidden = LSTM(self.model_parameters.hidden['size'],
return_sequences=True, stateful=False,
name="LSTM")
elif self.model_parameters.hidden['type'] == "GRU":
self.hidden = GRU(self.model_parameters.hidden['size'],
return_sequences=True, name="GRU")
# loss function and output activation are coupled, this sets them both
if self.model_parameters.train_loss == 'CE':
self.loss = losses.BinaryCrossentropy(from_logits=True)
#self.loss = 'binary_crossentropy'
self.activation = tf.nn.sigmoid
elif self.model_parameters.train_loss == 'MSE':
self.loss = losses.MeanSquaredError()
#self.loss = 'mean_squared_error'
self.activation = tf.nn.tanh
# set learning rate schedule
if list(self.model_parameters.learning_schedule.keys())[0] == 'noam':
self.lr_sched = noam_decay_lr(self.model_parameters.learning_schedule['noam']['warmup'])
lr = self.model_parameters.learning_schedule['noam']['initial']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'constant':
self.lr_sched = constant_lr(self.model_parameters.learning_schedule['constant']['rate'])
lr = self.model_parameters.learning_schedule['constant']['rate']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'polynomial':
self.lr_sched = polynomial_decay_lr(self.model_parameters.learning_schedule['polynomial']['max_epochs'],
self.model_parameters.learning_schedule['polynomial']['poly_pow'])
lr = self.model_parameters.learning_schedule['polynomial']['initial']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'step':
self.lr_sched = step_decay_lr(self.model_parameters.learning_schedule['step']['initial'],
self.model_parameters.learning_schedule['step']['drop_factor'],
self.model_parameters.learning_schedule['step']['drop_every'])
lr = self.model_parameters.learning_schedule['step']['initial']
# optimizer
if list(self.model_parameters.optimizer.keys())[0] == 'ADAM':
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr, **self.model_parameters.optimizer['ADAM'])
elif list(self.model_parameters.optimizer.keys())[0] == 'SGD':
self.optimizer = tf.keras.optimizers.SGD(learning_rate=lr, **self.model_parameters.optimizer['SGD'])
self.dense_output = Dense(output_len, activation=self.activation)
def call(self, inputs):
'''
Input is provided at training time.
'''
x = self.mask(inputs)
x = self.hidden(x)
return self.dense_output(x)
def model(self, input_shape):
'''
Function for model introspection
'''
x = Input(input_shape)
return Model(inputs=[x], outputs=self.call(x)) | earshot/model.py | from numpy import power,min,max,floor
import tensorflow as tf
from tensorflow.keras import Model, Sequential, optimizers, losses
from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM
from tensorflow.keras.callbacks import LearningRateScheduler
tf.keras.backend.set_floatx('float64')
'''
Learning rate adjustment functions.
'''
def noam_decay_lr(warmup):
'''
Wrapper to define noam decay; the wrapper method allows us to make the
lr update depend on additional parameters.
The maximal learning rate under this scheme occurs at epoch = warmup, and
will be equal to initial/warmup.
'''
def schedule(epoch, lr):
# learning scheduler takes current epoch and lr as passes
lrate = lr*power(warmup,-0.5)*min([(epoch+1)*power(warmup,-1.5),power(epoch+1,-0.5)])
return lrate
return LearningRateScheduler(schedule)
def step_decay_lr(initial_lr, drop_factor,drop_every):
'''
Wrapper that just drops the learning rate by a fixed factor (drop_factor) every drop_every
epochs.
'''
def schedule(epoch):
exp_fac = floor((1+epoch)/drop_every)
lrate = initial_lr*power(drop_factor,exp_fac)
return lrate
return LearningRateScheduler(schedule)
def polynomial_decay_lr(max_epochs,poly_pow):
'''
Wrapper that drops the learning rate to zero over max_epochs epochs, with
shape given by poly_pow (set poly_pow = 1 to get linear decay).
'''
def schedule(epoch, lr):
decay = power((1 - (epoch/max_epochs)),poly_pow)
lrate = lr*decay
return lrate
return LearningRateScheduler(schedule)
def constant_lr(initial):
'''
Wrapper that just clamps the learning rate at the initial value forever.
'''
def schedule(epoch):
return initial
return LearningRateScheduler(schedule)
class EARSHOT(Model):
'''
EARSHOT model sub-classing tf.keras.Model
'''
def __init__(self, output_len, model_parameters):
'''
output_len = length of target vector
model_parameters = model hyper parameters pulled from parameters.py
'''
super(EARSHOT, self).__init__(name='earshot')
self.model_parameters = model_parameters
self.mask = Masking(mask_value=-9999, name="mask")
if self.model_parameters.hidden['type'] == "LSTM":
self.hidden = LSTM(self.model_parameters.hidden['size'],
return_sequences=True, stateful=False,
name="LSTM")
elif self.model_parameters.hidden['type'] == "GRU":
self.hidden = GRU(self.model_parameters.hidden['size'],
return_sequences=True, name="GRU")
# loss function and output activation are coupled, this sets them both
if self.model_parameters.train_loss == 'CE':
self.loss = losses.BinaryCrossentropy(from_logits=True)
#self.loss = 'binary_crossentropy'
self.activation = tf.nn.sigmoid
elif self.model_parameters.train_loss == 'MSE':
self.loss = losses.MeanSquaredError()
#self.loss = 'mean_squared_error'
self.activation = tf.nn.tanh
# set learning rate schedule
if list(self.model_parameters.learning_schedule.keys())[0] == 'noam':
self.lr_sched = noam_decay_lr(self.model_parameters.learning_schedule['noam']['warmup'])
lr = self.model_parameters.learning_schedule['noam']['initial']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'constant':
self.lr_sched = constant_lr(self.model_parameters.learning_schedule['constant']['rate'])
lr = self.model_parameters.learning_schedule['constant']['rate']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'polynomial':
self.lr_sched = polynomial_decay_lr(self.model_parameters.learning_schedule['polynomial']['max_epochs'],
self.model_parameters.learning_schedule['polynomial']['poly_pow'])
lr = self.model_parameters.learning_schedule['polynomial']['initial']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'step':
self.lr_sched = step_decay_lr(self.model_parameters.learning_schedule['step']['initial'],
self.model_parameters.learning_schedule['step']['drop_factor'],
self.model_parameters.learning_schedule['step']['drop_every'])
lr = self.model_parameters.learning_schedule['step']['initial']
# optimizer
if list(self.model_parameters.optimizer.keys())[0] == 'ADAM':
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr, **self.model_parameters.optimizer['ADAM'])
elif list(self.model_parameters.optimizer.keys())[0] == 'SGD':
self.optimizer = tf.keras.optimizers.SGD(learning_rate=lr, **self.model_parameters.optimizer['SGD'])
self.dense_output = Dense(output_len, activation=self.activation)
def call(self, inputs):
'''
Input is provided at training time.
'''
x = self.mask(inputs)
x = self.hidden(x)
return self.dense_output(x)
def model(self, input_shape):
'''
Function for model introspection
'''
x = Input(input_shape)
return Model(inputs=[x], outputs=self.call(x)) | 0.833392 | 0.560614 |
WIDTH = sx = 854
HEIGHT = sy = 480
TITLE = "Clooky Clunker"
score, totalscore, clunkers = 0, 0, 0
nextgoal = 0
tgoal = -100
clunks = []
tbuy, buytext = -100, ""
t = 0
buttonrects = [Rect((50, 120 + 85 * j, 180, 70)) for j in range(4)]
buttonnames = ["auto-clunker", "clunkutron", "turbo enclunkulator", "clunx capacitor"]
buttoncosts = [10, 400, 12000, 250000]
def on_key_down(key):
if key == keys.ESCAPE:
exit()
def on_mouse_down(button, pos):
global score, totalscore, clunkers, tbuy, buytext
if button != 1:
return
x, y = pos
# Click on the central circle
if (x - sx / 2)**2 + (y - sy / 2)**2 < 100**2:
score += 1
totalscore += 1
# Add a "clunk" indicator at a pseudorandom place near the center
ix = sx / 2 + 12345678910. / (1 + t) % 1 * 200 - 100
iy = sy / 2 + 45678910123. / (1 + t) % 1 * 200 - 100
clunks.append((t, ix, iy))
# Click on one of the buttons
for j in range(len(buttonrects)):
rect, cost = buttonrects[j], buttoncosts[j]
if rect.collidepoint(x, y) and score >= cost:
score -= cost
clunkers += 10**j
tbuy = t
buytext = "+%s clunk/s" % (10**j)
buttoncosts[j] += int(round(cost * 0.2))
def update(dt):
global t
global score, totalscore, goaltext, tgoal, nextgoal
t += dt
score += clunkers * dt
totalscore += clunkers * dt
# Check for next achievement
if totalscore > 100 * (1 << nextgoal):
goaltext = "Achievement unlocked:\nCL%sKY!" % ("O" * (nextgoal + 2))
tgoal = t
nextgoal += 1
clunks[:] = [c for c in clunks if t - c[0] < 1]
def draw():
screen.fill((0, 30, 30))
# Draw the circle in the middle
screen.draw.filled_circle((sx // 2, sy // 2), 106, 'black')
screen.draw.filled_circle((sx // 2, sy // 2), 100, '#884400')
# Draw the buttons using screen.draw.textbox
for rect, name, cost in zip(buttonrects, buttonnames, buttoncosts):
screen.draw.filled_rect(rect, "#553300")
screen.draw.filled_rect(rect.inflate(-8, -8), "#332200")
text = u"%s: %d\u00A0clunks" % (name, cost)
color = "white" if cost <= score else "#666666"
box = rect.inflate(-16, -16)
screen.draw.textbox(text, box, fontname="bubblegum_sans", lineheight=0.9, color=color,
owidth=0.5)
# Draw the HUD
hudtext = "\n".join([
"time played: %d" % t,
"clunks: %d" % score,
"all-time clunks: %d" % totalscore,
"clunks per second: %d" % clunkers,
])
screen.draw.text(hudtext, right=sx - 10, top=120, fontname="roboto_condensed", fontsize=32,
color=(0, 200, 0), scolor=(0, 50, 0), shadow=(-1, 1), lineheight=1.3)
# Draw the title using a gradient
screen.draw.text("<NAME>", midtop=(sx / 2, 10), fontname="cherrycreamsoda",
fontsize=64, owidth=1.2, color="#884400", gcolor="#442200")
# Draw "clunk" indicators
for it, ix, iy in clunks:
dt = t - it
pos = ix, iy - 60 * dt
screen.draw.text("clunk", center=pos, fontname=None, fontsize=28, alpha=1 - dt,
shadow=(1, 1))
# Draw purchase indicator
if t - tbuy < 1:
dt = t - tbuy
pos = sx / 2, sy / 2
fontsize = 32 * (1 + 60 * dt)**0.2
screen.draw.text(buytext, pos, anchor=(0.5, 0.9), fontname="bubblegum_sans",
fontsize=fontsize, alpha=1 - dt, shadow=(1, 1))
# Draw achievement unlocked text (text is centered even though we specify
# bottom right).
if t - tgoal < 2:
alpha = min(2 - (t - tgoal), 1)
screen.draw.text(goaltext, fontname="boogaloo", fontsize=48, bottom=sy - 20, right=sx - 40,
color="#AAAAFF", gcolor="#4444AA", shadow=(1.5, 1.5), alpha=alpha,
align="center") | basic/clooky_clunker.py |
WIDTH = sx = 854
HEIGHT = sy = 480
TITLE = "Clooky Clunker"
score, totalscore, clunkers = 0, 0, 0
nextgoal = 0
tgoal = -100
clunks = []
tbuy, buytext = -100, ""
t = 0
buttonrects = [Rect((50, 120 + 85 * j, 180, 70)) for j in range(4)]
buttonnames = ["auto-clunker", "clunkutron", "turbo enclunkulator", "clunx capacitor"]
buttoncosts = [10, 400, 12000, 250000]
def on_key_down(key):
if key == keys.ESCAPE:
exit()
def on_mouse_down(button, pos):
global score, totalscore, clunkers, tbuy, buytext
if button != 1:
return
x, y = pos
# Click on the central circle
if (x - sx / 2)**2 + (y - sy / 2)**2 < 100**2:
score += 1
totalscore += 1
# Add a "clunk" indicator at a pseudorandom place near the center
ix = sx / 2 + 12345678910. / (1 + t) % 1 * 200 - 100
iy = sy / 2 + 45678910123. / (1 + t) % 1 * 200 - 100
clunks.append((t, ix, iy))
# Click on one of the buttons
for j in range(len(buttonrects)):
rect, cost = buttonrects[j], buttoncosts[j]
if rect.collidepoint(x, y) and score >= cost:
score -= cost
clunkers += 10**j
tbuy = t
buytext = "+%s clunk/s" % (10**j)
buttoncosts[j] += int(round(cost * 0.2))
def update(dt):
global t
global score, totalscore, goaltext, tgoal, nextgoal
t += dt
score += clunkers * dt
totalscore += clunkers * dt
# Check for next achievement
if totalscore > 100 * (1 << nextgoal):
goaltext = "Achievement unlocked:\nCL%sKY!" % ("O" * (nextgoal + 2))
tgoal = t
nextgoal += 1
clunks[:] = [c for c in clunks if t - c[0] < 1]
def draw():
screen.fill((0, 30, 30))
# Draw the circle in the middle
screen.draw.filled_circle((sx // 2, sy // 2), 106, 'black')
screen.draw.filled_circle((sx // 2, sy // 2), 100, '#884400')
# Draw the buttons using screen.draw.textbox
for rect, name, cost in zip(buttonrects, buttonnames, buttoncosts):
screen.draw.filled_rect(rect, "#553300")
screen.draw.filled_rect(rect.inflate(-8, -8), "#332200")
text = u"%s: %d\u00A0clunks" % (name, cost)
color = "white" if cost <= score else "#666666"
box = rect.inflate(-16, -16)
screen.draw.textbox(text, box, fontname="bubblegum_sans", lineheight=0.9, color=color,
owidth=0.5)
# Draw the HUD
hudtext = "\n".join([
"time played: %d" % t,
"clunks: %d" % score,
"all-time clunks: %d" % totalscore,
"clunks per second: %d" % clunkers,
])
screen.draw.text(hudtext, right=sx - 10, top=120, fontname="roboto_condensed", fontsize=32,
color=(0, 200, 0), scolor=(0, 50, 0), shadow=(-1, 1), lineheight=1.3)
# Draw the title using a gradient
screen.draw.text("<NAME>", midtop=(sx / 2, 10), fontname="cherrycreamsoda",
fontsize=64, owidth=1.2, color="#884400", gcolor="#442200")
# Draw "clunk" indicators
for it, ix, iy in clunks:
dt = t - it
pos = ix, iy - 60 * dt
screen.draw.text("clunk", center=pos, fontname=None, fontsize=28, alpha=1 - dt,
shadow=(1, 1))
# Draw purchase indicator
if t - tbuy < 1:
dt = t - tbuy
pos = sx / 2, sy / 2
fontsize = 32 * (1 + 60 * dt)**0.2
screen.draw.text(buytext, pos, anchor=(0.5, 0.9), fontname="bubblegum_sans",
fontsize=fontsize, alpha=1 - dt, shadow=(1, 1))
# Draw achievement unlocked text (text is centered even though we specify
# bottom right).
if t - tgoal < 2:
alpha = min(2 - (t - tgoal), 1)
screen.draw.text(goaltext, fontname="boogaloo", fontsize=48, bottom=sy - 20, right=sx - 40,
color="#AAAAFF", gcolor="#4444AA", shadow=(1.5, 1.5), alpha=alpha,
align="center") | 0.41739 | 0.197077 |
from wtforms.fields import IntegerField, SelectField
from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError
from indico.util.i18n import _, ngettext
from indico.web.fields.base import BaseField
from indico.web.forms.fields import IndicoRadioField, IndicoSelectMultipleCheckboxField, MultiStringField
from indico.web.forms.validators import HiddenUnless
class _ChoiceFieldBase(BaseField):
def copy_field_data(self):
"""
Return a copy of the field's configuration data without
the IDs used to identify selected options.
"""
field_data_copy = super().copy_field_data()
for option in field_data_copy['options']:
del option['id']
return field_data_copy
class SingleChoiceConfigForm:
display_type = IndicoRadioField(_('Display type'), [DataRequired()],
description=_('Widget that will be used to render the available options'),
choices=[('radio', _('Radio buttons')),
('select', _('Drop-down list'))],
default='radio')
radio_display_type = IndicoRadioField(_('Alignment'),
[HiddenUnless('display_type', 'radio'), DataRequired()],
description=_('The arrangement of the options'),
choices=[('vertical', _('Vertical')),
('horizontal', _('Horizontal'))])
options = MultiStringField(_('Options'), [DataRequired()], field=('option', _('option')), unique=True,
uuid_field='id', sortable=True,
description=_('Specify the options the user can choose from'))
class _EmptyNoneSelectField(SelectField):
def process_formdata(self, valuelist):
super().process_formdata(valuelist)
if not self.data:
self.data = None
class _EmptyNoneRadioField(IndicoRadioField):
def process_formdata(self, valuelist):
super().process_formdata(valuelist)
if not self.data:
self.data = None
class SingleChoiceField(_ChoiceFieldBase):
name = 'single_choice'
friendly_name = _('Single Choice')
config_form = SingleChoiceConfigForm
log_type = 'string'
def create_wtf_field(self):
field_options = {'coerce': lambda x: x}
choices = [(x['id'], x['option']) for x in self.object.field_data['options']]
if self.object.field_data['display_type'] == 'select':
field_class = _EmptyNoneSelectField
choices = [('', '')] + choices
else:
field_class = _EmptyNoneRadioField
field_options['orientation'] = self.object.field_data['radio_display_type']
if field_options['orientation'] == 'vertical' and not self.object.is_required:
field_options['default'] = ''
choices = [('', _('No selection'))] + choices
return self._make_wtforms_field(field_class, choices=choices, **field_options)
def is_value_empty(self, value):
# No selection is also a valid option
return False
def get_friendly_value(self, value):
option_map = {option_dict['id']: option_dict['option'] for option_dict in self.object.field_data['options']}
return option_map.get(value) or ''
class MultiSelectConfigForm:
options = MultiStringField(_('Options'), [DataRequired()], field=('option', _('option')), unique=True,
uuid_field='id', sortable=True, description=_('Specify the answers the user can select'))
min_choices = IntegerField(_('Minimum choices'), [HiddenUnless('is_required'), Optional(), NumberRange(min=0)],
description=_('The minimum amount of options the user has to choose.'))
max_choices = IntegerField(_('Maximum choices'), [HiddenUnless('is_required'), Optional(), NumberRange(min=1)],
description=_('The maximum amount of options the user may choose.'))
def _validate_min_max_choices(self):
if (self.min_choices.data is not None and self.max_choices.data is not None and
self.min_choices.data > self.max_choices.data):
raise ValidationError(_('Maximum choices must be greater than minimum choices.'))
def validate_min_choices(self, field):
if field.data is None:
return
if field.data >= len(self.options.data):
raise ValidationError(_('Minimum choices must be fewer than the total number of options.'))
def validate_max_choices(self, field):
if field.data is None:
return
self._validate_min_max_choices()
if field.data > len(self.options.data):
raise ValidationError(_('Maximum choices must be fewer or equal than the total number of options.'))
class MultiSelectField(_ChoiceFieldBase):
name = 'multiselect'
friendly_name = _('Select multiple')
config_form = MultiSelectConfigForm
wtf_field_class = IndicoSelectMultipleCheckboxField
log_type = 'list'
@property
def validators(self):
min_choices = self.object.field_data.get('min_choices')
max_choices = self.object.field_data.get('max_choices')
if min_choices is None and max_choices is None:
return
if min_choices is None:
min_choices = -1
if max_choices is None:
max_choices = -1
if max_choices == -1:
message = ngettext('Please select at least %(min)d option.',
'Please select at least %(min)d options.', min_choices)
elif min_choices == -1:
message = ngettext('Please select no more than %(max)d option.',
'Please select no more than %(max)d options.', max_choices)
else:
message = _('Please select between %(min)d and %(max)d options.')
return [Length(min=min_choices, max=max_choices, message=message)]
@property
def wtf_field_kwargs(self):
return {'choices': [(x['id'], x['option']) for x in self.object.field_data['options']],
'coerce': lambda x: x}
def get_friendly_value(self, value):
option_map = {option_dict['id']: option_dict['option'] for option_dict in self.object.field_data['options']}
return [option_map[id_] for id_ in value if id_ in option_map] | indico/web/fields/choices.py |
from wtforms.fields import IntegerField, SelectField
from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError
from indico.util.i18n import _, ngettext
from indico.web.fields.base import BaseField
from indico.web.forms.fields import IndicoRadioField, IndicoSelectMultipleCheckboxField, MultiStringField
from indico.web.forms.validators import HiddenUnless
class _ChoiceFieldBase(BaseField):
def copy_field_data(self):
"""
Return a copy of the field's configuration data without
the IDs used to identify selected options.
"""
field_data_copy = super().copy_field_data()
for option in field_data_copy['options']:
del option['id']
return field_data_copy
class SingleChoiceConfigForm:
display_type = IndicoRadioField(_('Display type'), [DataRequired()],
description=_('Widget that will be used to render the available options'),
choices=[('radio', _('Radio buttons')),
('select', _('Drop-down list'))],
default='radio')
radio_display_type = IndicoRadioField(_('Alignment'),
[HiddenUnless('display_type', 'radio'), DataRequired()],
description=_('The arrangement of the options'),
choices=[('vertical', _('Vertical')),
('horizontal', _('Horizontal'))])
options = MultiStringField(_('Options'), [DataRequired()], field=('option', _('option')), unique=True,
uuid_field='id', sortable=True,
description=_('Specify the options the user can choose from'))
class _EmptyNoneSelectField(SelectField):
def process_formdata(self, valuelist):
super().process_formdata(valuelist)
if not self.data:
self.data = None
class _EmptyNoneRadioField(IndicoRadioField):
def process_formdata(self, valuelist):
super().process_formdata(valuelist)
if not self.data:
self.data = None
class SingleChoiceField(_ChoiceFieldBase):
name = 'single_choice'
friendly_name = _('Single Choice')
config_form = SingleChoiceConfigForm
log_type = 'string'
def create_wtf_field(self):
field_options = {'coerce': lambda x: x}
choices = [(x['id'], x['option']) for x in self.object.field_data['options']]
if self.object.field_data['display_type'] == 'select':
field_class = _EmptyNoneSelectField
choices = [('', '')] + choices
else:
field_class = _EmptyNoneRadioField
field_options['orientation'] = self.object.field_data['radio_display_type']
if field_options['orientation'] == 'vertical' and not self.object.is_required:
field_options['default'] = ''
choices = [('', _('No selection'))] + choices
return self._make_wtforms_field(field_class, choices=choices, **field_options)
def is_value_empty(self, value):
# No selection is also a valid option
return False
def get_friendly_value(self, value):
option_map = {option_dict['id']: option_dict['option'] for option_dict in self.object.field_data['options']}
return option_map.get(value) or ''
class MultiSelectConfigForm:
options = MultiStringField(_('Options'), [DataRequired()], field=('option', _('option')), unique=True,
uuid_field='id', sortable=True, description=_('Specify the answers the user can select'))
min_choices = IntegerField(_('Minimum choices'), [HiddenUnless('is_required'), Optional(), NumberRange(min=0)],
description=_('The minimum amount of options the user has to choose.'))
max_choices = IntegerField(_('Maximum choices'), [HiddenUnless('is_required'), Optional(), NumberRange(min=1)],
description=_('The maximum amount of options the user may choose.'))
def _validate_min_max_choices(self):
if (self.min_choices.data is not None and self.max_choices.data is not None and
self.min_choices.data > self.max_choices.data):
raise ValidationError(_('Maximum choices must be greater than minimum choices.'))
def validate_min_choices(self, field):
if field.data is None:
return
if field.data >= len(self.options.data):
raise ValidationError(_('Minimum choices must be fewer than the total number of options.'))
def validate_max_choices(self, field):
if field.data is None:
return
self._validate_min_max_choices()
if field.data > len(self.options.data):
raise ValidationError(_('Maximum choices must be fewer or equal than the total number of options.'))
class MultiSelectField(_ChoiceFieldBase):
name = 'multiselect'
friendly_name = _('Select multiple')
config_form = MultiSelectConfigForm
wtf_field_class = IndicoSelectMultipleCheckboxField
log_type = 'list'
@property
def validators(self):
min_choices = self.object.field_data.get('min_choices')
max_choices = self.object.field_data.get('max_choices')
if min_choices is None and max_choices is None:
return
if min_choices is None:
min_choices = -1
if max_choices is None:
max_choices = -1
if max_choices == -1:
message = ngettext('Please select at least %(min)d option.',
'Please select at least %(min)d options.', min_choices)
elif min_choices == -1:
message = ngettext('Please select no more than %(max)d option.',
'Please select no more than %(max)d options.', max_choices)
else:
message = _('Please select between %(min)d and %(max)d options.')
return [Length(min=min_choices, max=max_choices, message=message)]
@property
def wtf_field_kwargs(self):
return {'choices': [(x['id'], x['option']) for x in self.object.field_data['options']],
'coerce': lambda x: x}
def get_friendly_value(self, value):
option_map = {option_dict['id']: option_dict['option'] for option_dict in self.object.field_data['options']}
return [option_map[id_] for id_ in value if id_ in option_map] | 0.826817 | 0.178562 |
import numpy as np
from skmultiflow.trees.nodes import ActiveLearningNodePerceptronMultiTarget
from skmultiflow.trees.attribute_observer import NumericAttributeRegressionObserverMultiTarget
from skmultiflow.trees.attribute_observer import NominalAttributeRegressionObserver
from skmultiflow.utils import get_dimensions
class SSTActiveLearningNode(ActiveLearningNodePerceptronMultiTarget):
""" Learning Node for SST-HT that always use stacked perceptrons to provide
targets responses.
Parameters
----------
initial_class_observations: dict
A dictionary containing the set of sufficient statistics to be
stored by the leaf node. It contains the following elements:
- 0: the sum of elements seen so far;
- 1: the sum of the targets values seen so far;
- 2: the sum of the squared values of the targets seen so far.
perceptron_weight: `numpy.ndarray` with number of features rows and
number of targets columns.
The weight matrix for the perceptron predictors. Set to `None`
by default (in that case it will be randomly initiated).
random_state : `int`, `RandomState` instance or None (default=None)
If int, `random_state` is used as seed to the random number
generator; If a `RandomState` instance, `random_state` is the
random number generator; If `None`, the random number generator
is the current `RandomState` instance used by `np.random`.
"""
def __init__(self, initial_class_observations, perceptron_weight=None,
random_state=None):
""" SSTActiveLearningNode class constructor."""
super().__init__(initial_class_observations, perceptron_weight,
random_state)
def learn_from_instance(self, X, y, weight, rht):
"""Update the node with the provided instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Instance targets.
weight: float
Instance weight.
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
if self.perceptron_weight is None:
self.perceptron_weight = {}
# Creates matrix of perceptron random weights
_, rows = get_dimensions(y)
_, cols = get_dimensions(X)
self.perceptron_weight[0] = \
self.random_state.uniform(-1.0, 1.0, (rows, cols + 1))
# Cascade Stacking
self.perceptron_weight[1] = \
self.random_state.uniform(-1.0, 1.0, (rows, rows + 1))
self.normalize_perceptron_weights()
try:
self._observed_class_distribution[0] += weight
except KeyError:
self._observed_class_distribution[0] = weight
if rht.learning_ratio_const:
learning_ratio = rht.learning_ratio_perceptron
else:
learning_ratio = rht.learning_ratio_perceptron / \
(1 + self._observed_class_distribution[0] *
rht.learning_ratio_decay)
try:
self._observed_class_distribution[1] += weight * y
self._observed_class_distribution[2] += weight * y * y
except KeyError:
self._observed_class_distribution[1] = weight * y
self._observed_class_distribution[2] = weight * y * y
for i in range(int(weight)):
self.update_weights(X, y, learning_ratio, rht)
for i, x in enumerate(X):
try:
obs = self._attribute_observers[i]
except KeyError:
# Creates targets observers, if not already defined
if rht.nominal_attributes is not None and i in rht.nominal_attributes:
obs = NominalAttributeRegressionObserver()
else:
obs = NumericAttributeRegressionObserverMultiTarget()
self._attribute_observers[i] = obs
obs.observe_attribute_class(x, y, weight)
def update_weights(self, X, y, learning_ratio, rht):
"""Update the perceptron weights
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Targets values.
learning_ratio: float
perceptron learning ratio
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
normalized_sample = rht.normalize_sample(X)
normalized_base_pred = self._predict_base(normalized_sample)
normalized_target_value = rht.normalized_target_value(y)
self.perceptron_weight[0] += learning_ratio * \
(normalized_target_value - normalized_base_pred)[:, None] @ \
normalized_sample[None, :]
# Add bias term
normalized_base_pred = np.append(normalized_base_pred, 1.0)
normalized_meta_pred = self._predict_meta(normalized_base_pred)
self.perceptron_weight[1] += learning_ratio * \
(normalized_target_value - normalized_meta_pred)[:, None] @ \
normalized_base_pred[None, :]
self.normalize_perceptron_weights()
# Normalize both levels
def normalize_perceptron_weights(self):
""" Normalizes both levels of perceptron weights."""
n_targets = self.perceptron_weight[0].shape[0]
# Normalize perceptron weights
for i in range(n_targets):
sum_w_0 = np.sum(np.absolute(self.perceptron_weight[0][i, :]))
self.perceptron_weight[0][i, :] /= sum_w_0
sum_w_1 = np.sum(np.absolute(self.perceptron_weight[1][i, :]))
self.perceptron_weight[1][i, :] /= sum_w_1
def _predict_base(self, X):
return self.perceptron_weight[0] @ X
def _predict_meta(self, X):
return self.perceptron_weight[1] @ X
def get_weight_seen(self):
"""Calculate the total weight seen by the node.
Returns
-------
float
Total weight seen.
"""
if self._observed_class_distribution == {}:
return 0
else:
return self._observed_class_distribution[0] | src/skmultiflow/trees/nodes/sst_active_learning_node.py | import numpy as np
from skmultiflow.trees.nodes import ActiveLearningNodePerceptronMultiTarget
from skmultiflow.trees.attribute_observer import NumericAttributeRegressionObserverMultiTarget
from skmultiflow.trees.attribute_observer import NominalAttributeRegressionObserver
from skmultiflow.utils import get_dimensions
class SSTActiveLearningNode(ActiveLearningNodePerceptronMultiTarget):
""" Learning Node for SST-HT that always use stacked perceptrons to provide
targets responses.
Parameters
----------
initial_class_observations: dict
A dictionary containing the set of sufficient statistics to be
stored by the leaf node. It contains the following elements:
- 0: the sum of elements seen so far;
- 1: the sum of the targets values seen so far;
- 2: the sum of the squared values of the targets seen so far.
perceptron_weight: `numpy.ndarray` with number of features rows and
number of targets columns.
The weight matrix for the perceptron predictors. Set to `None`
by default (in that case it will be randomly initiated).
random_state : `int`, `RandomState` instance or None (default=None)
If int, `random_state` is used as seed to the random number
generator; If a `RandomState` instance, `random_state` is the
random number generator; If `None`, the random number generator
is the current `RandomState` instance used by `np.random`.
"""
def __init__(self, initial_class_observations, perceptron_weight=None,
random_state=None):
""" SSTActiveLearningNode class constructor."""
super().__init__(initial_class_observations, perceptron_weight,
random_state)
def learn_from_instance(self, X, y, weight, rht):
"""Update the node with the provided instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Instance targets.
weight: float
Instance weight.
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
if self.perceptron_weight is None:
self.perceptron_weight = {}
# Creates matrix of perceptron random weights
_, rows = get_dimensions(y)
_, cols = get_dimensions(X)
self.perceptron_weight[0] = \
self.random_state.uniform(-1.0, 1.0, (rows, cols + 1))
# Cascade Stacking
self.perceptron_weight[1] = \
self.random_state.uniform(-1.0, 1.0, (rows, rows + 1))
self.normalize_perceptron_weights()
try:
self._observed_class_distribution[0] += weight
except KeyError:
self._observed_class_distribution[0] = weight
if rht.learning_ratio_const:
learning_ratio = rht.learning_ratio_perceptron
else:
learning_ratio = rht.learning_ratio_perceptron / \
(1 + self._observed_class_distribution[0] *
rht.learning_ratio_decay)
try:
self._observed_class_distribution[1] += weight * y
self._observed_class_distribution[2] += weight * y * y
except KeyError:
self._observed_class_distribution[1] = weight * y
self._observed_class_distribution[2] = weight * y * y
for i in range(int(weight)):
self.update_weights(X, y, learning_ratio, rht)
for i, x in enumerate(X):
try:
obs = self._attribute_observers[i]
except KeyError:
# Creates targets observers, if not already defined
if rht.nominal_attributes is not None and i in rht.nominal_attributes:
obs = NominalAttributeRegressionObserver()
else:
obs = NumericAttributeRegressionObserverMultiTarget()
self._attribute_observers[i] = obs
obs.observe_attribute_class(x, y, weight)
def update_weights(self, X, y, learning_ratio, rht):
"""Update the perceptron weights
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Targets values.
learning_ratio: float
perceptron learning ratio
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
normalized_sample = rht.normalize_sample(X)
normalized_base_pred = self._predict_base(normalized_sample)
normalized_target_value = rht.normalized_target_value(y)
self.perceptron_weight[0] += learning_ratio * \
(normalized_target_value - normalized_base_pred)[:, None] @ \
normalized_sample[None, :]
# Add bias term
normalized_base_pred = np.append(normalized_base_pred, 1.0)
normalized_meta_pred = self._predict_meta(normalized_base_pred)
self.perceptron_weight[1] += learning_ratio * \
(normalized_target_value - normalized_meta_pred)[:, None] @ \
normalized_base_pred[None, :]
self.normalize_perceptron_weights()
# Normalize both levels
def normalize_perceptron_weights(self):
""" Normalizes both levels of perceptron weights."""
n_targets = self.perceptron_weight[0].shape[0]
# Normalize perceptron weights
for i in range(n_targets):
sum_w_0 = np.sum(np.absolute(self.perceptron_weight[0][i, :]))
self.perceptron_weight[0][i, :] /= sum_w_0
sum_w_1 = np.sum(np.absolute(self.perceptron_weight[1][i, :]))
self.perceptron_weight[1][i, :] /= sum_w_1
def _predict_base(self, X):
return self.perceptron_weight[0] @ X
def _predict_meta(self, X):
return self.perceptron_weight[1] @ X
def get_weight_seen(self):
"""Calculate the total weight seen by the node.
Returns
-------
float
Total weight seen.
"""
if self._observed_class_distribution == {}:
return 0
else:
return self._observed_class_distribution[0] | 0.896499 | 0.645511 |
import uuid
import testtools
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Servers API using admin privileges"""
@classmethod
def setup_clients(cls):
super(ServersAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_adm.servers_client
cls.non_adm_client = cls.servers_client
cls.flavors_client = cls.os_adm.flavors_client
@classmethod
def resource_setup(cls):
super(ServersAdminNegativeTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
cls.s1_name = data_utils.rand_name('server')
server = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s1_id = server['id']
def _get_unused_flavor_id(self):
flavor_id = data_utils.rand_int_id(start=1000)
while True:
try:
self.flavors_client.show_flavor(flavor_id)
except lib_exc.NotFound:
break
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
@test.idempotent_id('28dcec23-f807-49da-822c-56a92ea3c687')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_using_overlimit_ram(self):
# NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor")
flavor_id = self._get_unused_flavor_id()
quota_set = (self.quotas_client.show_default_quota_set(self.tenant_id)
['quota_set'])
ram = int(quota_set['ram'])
if ram == -1:
raise self.skipException("default ram quota set is -1,"
" cannot test overlimit")
ram += 1
vcpus = 8
disk = 10
flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
ram=ram, vcpus=vcpus,
disk=disk,
id=flavor_id)['flavor']
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.client.resize_server,
self.servers[0]['id'],
flavor_ref['id'])
@test.idempotent_id('7368a427-2f26-4ad9-9ba9-911a0ec2b0db')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_using_overlimit_vcpus(self):
# NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor")
flavor_id = self._get_unused_flavor_id()
ram = 512
quota_set = (self.quotas_client.show_default_quota_set(self.tenant_id)
['quota_set'])
vcpus = int(quota_set['cores'])
if vcpus == -1:
raise self.skipException("default cores quota set is -1,"
" cannot test overlimit")
vcpus += 1
disk = 10
flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
ram=ram, vcpus=vcpus,
disk=disk,
id=flavor_id)['flavor']
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.client.resize_server,
self.servers[0]['id'],
flavor_ref['id'])
@test.attr(type=['negative'])
@test.idempotent_id('b0b4d8af-1256-41ef-9ee7-25f1c19dde80')
def test_reset_state_server_invalid_state(self):
self.assertRaises(lib_exc.BadRequest,
self.client.reset_state, self.s1_id,
state='invalid')
@test.attr(type=['negative'])
@test.idempotent_id('4cdcc984-fab0-4577-9a9d-6d558527ee9d')
def test_reset_state_server_invalid_type(self):
self.assertRaises(lib_exc.BadRequest,
self.client.reset_state, self.s1_id,
state=1)
@test.attr(type=['negative'])
@test.idempotent_id('e741298b-8df2-46f0-81cb-8f814ff2504c')
def test_reset_state_server_nonexistent_server(self):
self.assertRaises(lib_exc.NotFound,
self.client.reset_state, '999', state='error')
@test.attr(type=['negative'])
@test.idempotent_id('e84e2234-60d2-42fa-8b30-e2d3049724ac')
def test_get_server_diagnostics_by_non_admin(self):
# Non-admin user can not view server diagnostics according to policy
self.assertRaises(lib_exc.Forbidden,
self.non_adm_client.show_server_diagnostics,
self.s1_id)
@test.attr(type=['negative'])
@test.idempotent_id('46a4e1ca-87ae-4d28-987a-1b6b136a0221')
def test_migrate_non_existent_server(self):
# migrate a non existent server
self.assertRaises(lib_exc.NotFound,
self.client.migrate_server,
str(uuid.uuid4()))
@test.idempotent_id('b0b17f83-d14e-4fc4-8f31-bcc9f3cfa629')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_migrate_server_invalid_state(self):
# create server.
server = self.create_test_server(wait_until='ACTIVE')
server_id = server['id']
# suspend the server.
self.client.suspend_server(server_id)
waiters.wait_for_server_status(self.client,
server_id, 'SUSPENDED')
# migrate a suspended server should fail
self.assertRaises(lib_exc.Conflict,
self.client.migrate_server,
server_id) | tempest/api/compute/admin/test_servers_negative.py |
import uuid
import testtools
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Servers API using admin privileges"""
@classmethod
def setup_clients(cls):
super(ServersAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_adm.servers_client
cls.non_adm_client = cls.servers_client
cls.flavors_client = cls.os_adm.flavors_client
@classmethod
def resource_setup(cls):
super(ServersAdminNegativeTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
cls.s1_name = data_utils.rand_name('server')
server = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s1_id = server['id']
def _get_unused_flavor_id(self):
flavor_id = data_utils.rand_int_id(start=1000)
while True:
try:
self.flavors_client.show_flavor(flavor_id)
except lib_exc.NotFound:
break
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
@test.idempotent_id('28dcec23-f807-49da-822c-56a92ea3c687')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_using_overlimit_ram(self):
# NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor")
flavor_id = self._get_unused_flavor_id()
quota_set = (self.quotas_client.show_default_quota_set(self.tenant_id)
['quota_set'])
ram = int(quota_set['ram'])
if ram == -1:
raise self.skipException("default ram quota set is -1,"
" cannot test overlimit")
ram += 1
vcpus = 8
disk = 10
flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
ram=ram, vcpus=vcpus,
disk=disk,
id=flavor_id)['flavor']
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.client.resize_server,
self.servers[0]['id'],
flavor_ref['id'])
@test.idempotent_id('7368a427-2f26-4ad9-9ba9-911a0ec2b0db')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_using_overlimit_vcpus(self):
# NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor")
flavor_id = self._get_unused_flavor_id()
ram = 512
quota_set = (self.quotas_client.show_default_quota_set(self.tenant_id)
['quota_set'])
vcpus = int(quota_set['cores'])
if vcpus == -1:
raise self.skipException("default cores quota set is -1,"
" cannot test overlimit")
vcpus += 1
disk = 10
flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
ram=ram, vcpus=vcpus,
disk=disk,
id=flavor_id)['flavor']
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.client.resize_server,
self.servers[0]['id'],
flavor_ref['id'])
@test.attr(type=['negative'])
@test.idempotent_id('b0b4d8af-1256-41ef-9ee7-25f1c19dde80')
def test_reset_state_server_invalid_state(self):
self.assertRaises(lib_exc.BadRequest,
self.client.reset_state, self.s1_id,
state='invalid')
@test.attr(type=['negative'])
@test.idempotent_id('4cdcc984-fab0-4577-9a9d-6d558527ee9d')
def test_reset_state_server_invalid_type(self):
self.assertRaises(lib_exc.BadRequest,
self.client.reset_state, self.s1_id,
state=1)
@test.attr(type=['negative'])
@test.idempotent_id('e741298b-8df2-46f0-81cb-8f814ff2504c')
def test_reset_state_server_nonexistent_server(self):
self.assertRaises(lib_exc.NotFound,
self.client.reset_state, '999', state='error')
@test.attr(type=['negative'])
@test.idempotent_id('e84e2234-60d2-42fa-8b30-e2d3049724ac')
def test_get_server_diagnostics_by_non_admin(self):
# Non-admin user can not view server diagnostics according to policy
self.assertRaises(lib_exc.Forbidden,
self.non_adm_client.show_server_diagnostics,
self.s1_id)
@test.attr(type=['negative'])
@test.idempotent_id('46a4e1ca-87ae-4d28-987a-1b6b136a0221')
def test_migrate_non_existent_server(self):
# migrate a non existent server
self.assertRaises(lib_exc.NotFound,
self.client.migrate_server,
str(uuid.uuid4()))
@test.idempotent_id('b0b17f83-d14e-4fc4-8f31-bcc9f3cfa629')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative'])
def test_migrate_server_invalid_state(self):
# create server.
server = self.create_test_server(wait_until='ACTIVE')
server_id = server['id']
# suspend the server.
self.client.suspend_server(server_id)
waiters.wait_for_server_status(self.client,
server_id, 'SUSPENDED')
# migrate a suspended server should fail
self.assertRaises(lib_exc.Conflict,
self.client.migrate_server,
server_id) | 0.359364 | 0.177543 |
from __future__ import unicode_literals
import numbers
import six
from flex._compat import Sequence, Mapping
SCHEMES = (
'http', 'https', 'ws', 'wss',
)
MIMETYPES = (
'application/json',
)
NULL = 'null'
BOOLEAN = 'boolean'
INTEGER = 'integer'
NUMBER = 'number'
STRING = 'string'
ARRAY = 'array'
OBJECT = 'object'
FILE = 'file'
PRIMITIVE_TYPES = {
'': (type(None),),
None: (type(None),),
NULL: (type(None),),
BOOLEAN: (bool,),
INTEGER: six.integer_types,
NUMBER: (numbers.Number,),
STRING: (six.binary_type, six.text_type),
ARRAY: (Sequence,),
OBJECT: (Mapping,),
}
TRUE_VALUES = set(('true', 'True', '1'))
FALSE_VALUES = set(('false', 'False', '0', ''))
HEADER_TYPES = (
STRING,
INTEGER,
NUMBER,
BOOLEAN,
ARRAY,
)
PATH = 'path'
BODY = 'body'
QUERY = 'query'
FORM_DATA = 'formData'
HEADER = 'header'
PARAMETER_IN_VALUES = (
QUERY,
HEADER,
PATH,
FORM_DATA,
BODY,
)
CSV = 'csv'
MULTI = 'multi'
SSV = 'ssv'
TSV = 'tsv'
PIPES = 'pipes'
COLLECTION_FORMATS = (
CSV,
SSV,
TSV,
PIPES,
MULTI,
)
DELIMETERS = {
CSV: ',',
SSV: ' ',
TSV: '\t',
PIPES: '|',
}
API_KEY = 'apiKey'
BASIC = 'basic'
OAUTH_2 = 'oath2'
SECURITY_TYPES = (
API_KEY,
BASIC,
OAUTH_2,
)
QUERY = QUERY
HEADER = HEADER
SECURITY_API_KEY_LOCATIONS = (
QUERY,
HEADER,
)
IMPLICIT = 'implicit'
PASSWORD = 'password'
APPLICATION = 'application'
ACCESS_CODE = 'accessCode'
SECURITY_FLOWS = (
IMPLICIT,
PASSWORD,
APPLICATION,
ACCESS_CODE,
)
class Empty(object):
def __cmp__(self, other):
raise TypeError('Empty cannot be compared to other values')
"""
Sentinal empty value for use with distinguishing `None` from a key not
being present.
"""
EMPTY = Empty()
UUID = 'uuid'
DATE = 'date'
DATETIME = 'date-time'
EMAIL = 'email'
INT32 = 'int32'
INT64 = 'int64'
URI = 'uri'
FORMATS = (
('integer', 'int32'),
('integer', 'int64'),
('number', 'float'),
('number', 'double'),
('string', 'byte'),
('string', 'date'),
('string', 'date-time'),
('string', 'email'),
('string', 'uri'),
)
# Request Methods
GET = 'get'
PUT = 'put'
POST = 'post'
DELETE = 'delete'
OPTIONS = 'options'
HEAD = 'head'
PATCH = 'patch'
REQUEST_METHODS = (GET, PUT, POST, DELETE, OPTIONS, HEAD, PATCH)
# Environment variables
FLEX_DISABLE_X_NULLABLE = 'FLEX_DISABLE_X_NULLABLE' | flex/constants.py | from __future__ import unicode_literals
import numbers
import six
from flex._compat import Sequence, Mapping
SCHEMES = (
'http', 'https', 'ws', 'wss',
)
MIMETYPES = (
'application/json',
)
NULL = 'null'
BOOLEAN = 'boolean'
INTEGER = 'integer'
NUMBER = 'number'
STRING = 'string'
ARRAY = 'array'
OBJECT = 'object'
FILE = 'file'
PRIMITIVE_TYPES = {
'': (type(None),),
None: (type(None),),
NULL: (type(None),),
BOOLEAN: (bool,),
INTEGER: six.integer_types,
NUMBER: (numbers.Number,),
STRING: (six.binary_type, six.text_type),
ARRAY: (Sequence,),
OBJECT: (Mapping,),
}
TRUE_VALUES = set(('true', 'True', '1'))
FALSE_VALUES = set(('false', 'False', '0', ''))
HEADER_TYPES = (
STRING,
INTEGER,
NUMBER,
BOOLEAN,
ARRAY,
)
PATH = 'path'
BODY = 'body'
QUERY = 'query'
FORM_DATA = 'formData'
HEADER = 'header'
PARAMETER_IN_VALUES = (
QUERY,
HEADER,
PATH,
FORM_DATA,
BODY,
)
CSV = 'csv'
MULTI = 'multi'
SSV = 'ssv'
TSV = 'tsv'
PIPES = 'pipes'
COLLECTION_FORMATS = (
CSV,
SSV,
TSV,
PIPES,
MULTI,
)
DELIMETERS = {
CSV: ',',
SSV: ' ',
TSV: '\t',
PIPES: '|',
}
API_KEY = 'apiKey'
BASIC = 'basic'
OAUTH_2 = 'oath2'
SECURITY_TYPES = (
API_KEY,
BASIC,
OAUTH_2,
)
QUERY = QUERY
HEADER = HEADER
SECURITY_API_KEY_LOCATIONS = (
QUERY,
HEADER,
)
IMPLICIT = 'implicit'
PASSWORD = 'password'
APPLICATION = 'application'
ACCESS_CODE = 'accessCode'
SECURITY_FLOWS = (
IMPLICIT,
PASSWORD,
APPLICATION,
ACCESS_CODE,
)
class Empty(object):
def __cmp__(self, other):
raise TypeError('Empty cannot be compared to other values')
"""
Sentinal empty value for use with distinguishing `None` from a key not
being present.
"""
EMPTY = Empty()
UUID = 'uuid'
DATE = 'date'
DATETIME = 'date-time'
EMAIL = 'email'
INT32 = 'int32'
INT64 = 'int64'
URI = 'uri'
FORMATS = (
('integer', 'int32'),
('integer', 'int64'),
('number', 'float'),
('number', 'double'),
('string', 'byte'),
('string', 'date'),
('string', 'date-time'),
('string', 'email'),
('string', 'uri'),
)
# Request Methods
GET = 'get'
PUT = 'put'
POST = 'post'
DELETE = 'delete'
OPTIONS = 'options'
HEAD = 'head'
PATCH = 'patch'
REQUEST_METHODS = (GET, PUT, POST, DELETE, OPTIONS, HEAD, PATCH)
# Environment variables
FLEX_DISABLE_X_NULLABLE = 'FLEX_DISABLE_X_NULLABLE' | 0.572962 | 0.081593 |
import json
import sys
if sys.version_info[0] != 3:
range = xrange # @ReservedAssignment @UndefinedVariable
class VkTools(object):
""" Содержит некоторые воспомогательные функции, которые могут понадобиться
при использовании API
"""
__slots__ = ('vk',)
def __init__(self, vk):
"""
:param vk: объект VkApi
"""
self.vk = vk
def get_all(self, method, max_count, values=None, key='items', limit=None):
""" Получить все элементы
Работает в методах, где в ответе есть count и items или users
За один запрос получает max_count * 25 элементов
:param method: метод
:param values: параметры
:param max_count: максимальное количество элементов,
которое можно получить за один раз
:param key: ключ элементов, которые нужно получить
:param limit: ограничение на кол-во получаемых элементов,
но может прийти больше
"""
if values:
values = values.copy()
else:
values = {}
items = []
offset = 0
while True:
run_code = code_get_all_items % (
max_count, offset, key, json.dumps(values, ensure_ascii=False),
method, method
)
response = self.vk.method('execute', {'code': run_code})
items += response['items']
offset = response['offset']
if offset >= response['count']:
break
if limit and len(items) >= limit:
break
return {'count': len(items), key: items}
def get_all_slow(self, method, max_count, values=None, key='items',
limit=None):
""" Получить все элементы
Работает в методах, где в ответе есть count и items или users
:param method: метод
:param values: параметры
:param max_count: максимальное количество элементов,
которое можно получить за один раз
:param key: ключ элементов, которые нужно получить
:param limit: ограничение на кол-во получаемых элементов,
но может прийти больше
"""
if not values:
values = {}
else:
values = values.copy()
values.update({'count': max_count})
response = self.vk.method(method, values)
count = response['count']
items = response[key]
for i in range(max_count, count + 1, max_count):
values.update({
'offset': i
})
response = self.vk.method(method, values)
items += response[key]
if limit and len(items) >= limit:
break
return {'count': len(items), key: items}
class VkRequestsPool(object):
""" Позволяет сделать несколько обращений к API за один запрос
за счет метода execute
"""
__slots__ = ('vk', 'pool', 'one_param')
def __init__(self, vk):
self.vk = vk
self.pool = []
self.one_param = False
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.execute()
def method(self, method, values=None):
""" Добавляет запрос в пулл
:param method: метод
:param values: параметры
"""
if self.one_param:
raise Exception('One param mode dont work with self.method')
req = (method, values, {})
self.pool.append(req)
return req[2]
def method_one_param(self, method, default_values=None, key=None,
values=None):
""" Использовать, если изменяется значение только одного параметра
:param method: метод
:param default_values: одинаковые значения для запросов
:param key: ключ изменяющегося параметра
:param values: список значений изменяющегося параметра (max: 25)
"""
if self.one_param is False and self.pool:
raise Exception('One param mode dont work with self.method')
if default_values is None:
default_values = {}
self.one_param = {
'method': method,
'default': default_values,
'key': key,
'return': {}
}
self.pool = values
return self.one_param['return']
def check_one_method(self, pool):
""" Возвращает True, если все запросы в пулле к одному методу """
if len(pool) > 1:
first_method = pool[0][0]
for req in pool[1:]:
if req[0] != first_method:
break
else:
return True
return False
def gen_code_one_method(self, pool):
""" Генерирует код для одного метода
(если в пулле запросы к одному методу)
"""
method = pool[0][0]
list_values = [i[1] for i in pool]
json_list_values = json.dumps(list_values, separators=(',', ':'))
run_code = code_requestspoll_one_method % (
json_list_values, method
)
return run_code
def gen_code_one_param(self, pool):
""" Генерирует код для одного метода и одного меняющегося параметра
(если в пулле запросы к одному методу, с одним меняющеися параметром)
"""
run_code = code_requestspoll_one_param % (
json.dumps(self.one_param['default'], separators=(',', ':')),
json.dumps(pool, separators=(',', ':')),
self.one_param['key'],
self.one_param['method']
)
# print(run_code)
return run_code
def gen_code_many_methods(self, pool):
""" Генерирует код для нескольких методов """
reqs = ','.join(
'API.{}({})'.format(i[0], json.dumps(i[1]), separators=(',', ':'))
for i in pool
)
run_code = 'return [{}];'.format(reqs)
return run_code
def execute(self):
for i in range(0, len(self.pool), 25):
cur_pool = self.pool[i:i + 25]
if self.one_param:
run_code = self.gen_code_one_param(cur_pool)
else:
one_method = self.check_one_method(cur_pool)
if one_method:
run_code = self.gen_code_one_method(cur_pool)
else:
run_code = self.gen_code_many_methods(cur_pool)
response = self.vk.method('execute', {'code': run_code})
for x in range(len(response)):
if self.one_param:
self.one_param['return'][cur_pool[x]] = response[x]
else:
self.pool[i + x][2].update(response[x])
# Полный код в файле vk_procedures
code_get_all_items = """
var m=%s,n=%s,b="%s",v=n;var c={count:m,offset:v}+%s;var r=API.%s(c),k=r.count,
j=r[b],i=1;while(i<25&&v+m<=k){v=i*m+n;c.offset=v;j=j+API.%s(c)[b];i=i+1;}
return {count:k,items:j,offset:v+m};
""".replace('\n', '')
code_requestspoll_one_method = """
var p=%s,i=0,r=[];while(i<p.length){r.push(API.%s(p[i]));i=i+1;}return r;
""".replace('\n', '')
code_requestspoll_one_param = """
var d=%s,v=%s,r=[],i=0;while(i<v.length){d.%s=v[i];r.push(API.%s(d));i=i+1;};
return r;
""".replace('\n', '') | vk_api/vk_tools.py | import json
import sys
if sys.version_info[0] != 3:
range = xrange # @ReservedAssignment @UndefinedVariable
class VkTools(object):
""" Содержит некоторые воспомогательные функции, которые могут понадобиться
при использовании API
"""
__slots__ = ('vk',)
def __init__(self, vk):
"""
:param vk: объект VkApi
"""
self.vk = vk
def get_all(self, method, max_count, values=None, key='items', limit=None):
""" Получить все элементы
Работает в методах, где в ответе есть count и items или users
За один запрос получает max_count * 25 элементов
:param method: метод
:param values: параметры
:param max_count: максимальное количество элементов,
которое можно получить за один раз
:param key: ключ элементов, которые нужно получить
:param limit: ограничение на кол-во получаемых элементов,
но может прийти больше
"""
if values:
values = values.copy()
else:
values = {}
items = []
offset = 0
while True:
run_code = code_get_all_items % (
max_count, offset, key, json.dumps(values, ensure_ascii=False),
method, method
)
response = self.vk.method('execute', {'code': run_code})
items += response['items']
offset = response['offset']
if offset >= response['count']:
break
if limit and len(items) >= limit:
break
return {'count': len(items), key: items}
def get_all_slow(self, method, max_count, values=None, key='items',
limit=None):
""" Получить все элементы
Работает в методах, где в ответе есть count и items или users
:param method: метод
:param values: параметры
:param max_count: максимальное количество элементов,
которое можно получить за один раз
:param key: ключ элементов, которые нужно получить
:param limit: ограничение на кол-во получаемых элементов,
но может прийти больше
"""
if not values:
values = {}
else:
values = values.copy()
values.update({'count': max_count})
response = self.vk.method(method, values)
count = response['count']
items = response[key]
for i in range(max_count, count + 1, max_count):
values.update({
'offset': i
})
response = self.vk.method(method, values)
items += response[key]
if limit and len(items) >= limit:
break
return {'count': len(items), key: items}
class VkRequestsPool(object):
""" Позволяет сделать несколько обращений к API за один запрос
за счет метода execute
"""
__slots__ = ('vk', 'pool', 'one_param')
def __init__(self, vk):
self.vk = vk
self.pool = []
self.one_param = False
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.execute()
def method(self, method, values=None):
""" Добавляет запрос в пулл
:param method: метод
:param values: параметры
"""
if self.one_param:
raise Exception('One param mode dont work with self.method')
req = (method, values, {})
self.pool.append(req)
return req[2]
def method_one_param(self, method, default_values=None, key=None,
values=None):
""" Использовать, если изменяется значение только одного параметра
:param method: метод
:param default_values: одинаковые значения для запросов
:param key: ключ изменяющегося параметра
:param values: список значений изменяющегося параметра (max: 25)
"""
if self.one_param is False and self.pool:
raise Exception('One param mode dont work with self.method')
if default_values is None:
default_values = {}
self.one_param = {
'method': method,
'default': default_values,
'key': key,
'return': {}
}
self.pool = values
return self.one_param['return']
def check_one_method(self, pool):
""" Возвращает True, если все запросы в пулле к одному методу """
if len(pool) > 1:
first_method = pool[0][0]
for req in pool[1:]:
if req[0] != first_method:
break
else:
return True
return False
def gen_code_one_method(self, pool):
""" Генерирует код для одного метода
(если в пулле запросы к одному методу)
"""
method = pool[0][0]
list_values = [i[1] for i in pool]
json_list_values = json.dumps(list_values, separators=(',', ':'))
run_code = code_requestspoll_one_method % (
json_list_values, method
)
return run_code
def gen_code_one_param(self, pool):
""" Генерирует код для одного метода и одного меняющегося параметра
(если в пулле запросы к одному методу, с одним меняющеися параметром)
"""
run_code = code_requestspoll_one_param % (
json.dumps(self.one_param['default'], separators=(',', ':')),
json.dumps(pool, separators=(',', ':')),
self.one_param['key'],
self.one_param['method']
)
# print(run_code)
return run_code
def gen_code_many_methods(self, pool):
""" Генерирует код для нескольких методов """
reqs = ','.join(
'API.{}({})'.format(i[0], json.dumps(i[1]), separators=(',', ':'))
for i in pool
)
run_code = 'return [{}];'.format(reqs)
return run_code
def execute(self):
for i in range(0, len(self.pool), 25):
cur_pool = self.pool[i:i + 25]
if self.one_param:
run_code = self.gen_code_one_param(cur_pool)
else:
one_method = self.check_one_method(cur_pool)
if one_method:
run_code = self.gen_code_one_method(cur_pool)
else:
run_code = self.gen_code_many_methods(cur_pool)
response = self.vk.method('execute', {'code': run_code})
for x in range(len(response)):
if self.one_param:
self.one_param['return'][cur_pool[x]] = response[x]
else:
self.pool[i + x][2].update(response[x])
# Полный код в файле vk_procedures
code_get_all_items = """
var m=%s,n=%s,b="%s",v=n;var c={count:m,offset:v}+%s;var r=API.%s(c),k=r.count,
j=r[b],i=1;while(i<25&&v+m<=k){v=i*m+n;c.offset=v;j=j+API.%s(c)[b];i=i+1;}
return {count:k,items:j,offset:v+m};
""".replace('\n', '')
code_requestspoll_one_method = """
var p=%s,i=0,r=[];while(i<p.length){r.push(API.%s(p[i]));i=i+1;}return r;
""".replace('\n', '')
code_requestspoll_one_param = """
var d=%s,v=%s,r=[],i=0;while(i<v.length){d.%s=v[i];r.push(API.%s(d));i=i+1;};
return r;
""".replace('\n', '') | 0.191252 | 0.467696 |
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._monitor_query_client_enums import *
class BatchQueryRequest(msrest.serialization.Model):
"""An single request in a batch.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The error details.
:type id: str
:param headers: Dictionary of :code:`<string>`.
:type headers: dict[str, str]
:param body: Required. The Analytics query. Learn more about the `Analytics query syntax
<https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/>`_.
:type body: ~monitor_query_client.models.QueryBody
:ivar path: Default value: "/query".
:vartype path: str
:ivar method: Default value: "POST".
:vartype method: str
:param workspace: Required. Workspace Id to be included in the query.
:type workspace: str
"""
_validation = {
'id': {'required': True},
'body': {'required': True},
'path': {'constant': True},
'method': {'constant': True},
'workspace': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'headers': {'key': 'headers', 'type': '{str}'},
'body': {'key': 'body', 'type': 'QueryBody'},
'path': {'key': 'path', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'workspace': {'key': 'workspace', 'type': 'str'},
}
path = "/query"
method = "POST"
def __init__(
self,
*,
id: str,
body: "QueryBody",
workspace: str,
headers: Optional[Dict[str, str]] = None,
**kwargs
):
super(BatchQueryRequest, self).__init__(**kwargs)
self.id = id
self.headers = headers
self.body = body
self.workspace = workspace
class BatchQueryResponse(msrest.serialization.Model):
"""BatchQueryResponse.
:param id:
:type id: str
:param status:
:type status: int
:param body: Contains the tables, columns & rows resulting from a query.
:type body: ~monitor_query_client.models.BatchQueryResults
:param headers: Dictionary of :code:`<string>`.
:type headers: dict[str, str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'int'},
'body': {'key': 'body', 'type': 'BatchQueryResults'},
'headers': {'key': 'headers', 'type': '{str}'},
}
def __init__(
self,
*,
id: Optional[str] = None,
status: Optional[int] = None,
body: Optional["BatchQueryResults"] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs
):
super(BatchQueryResponse, self).__init__(**kwargs)
self.id = id
self.status = status
self.body = body
self.headers = headers
class BatchQueryResults(msrest.serialization.Model):
"""Contains the tables, columns & rows resulting from a query.
:param tables: The list of tables, columns and rows.
:type tables: list[~monitor_query_client.models.Table]
:param statistics: Statistics represented in JSON format.
:type statistics: object
:param render: Visualization data in JSON format.
:type render: object
:param error: The code and message for an error.
:type error: ~monitor_query_client.models.ErrorInfo
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[Table]'},
'statistics': {'key': 'statistics', 'type': 'object'},
'render': {'key': 'render', 'type': 'object'},
'error': {'key': 'error', 'type': 'ErrorInfo'},
}
def __init__(
self,
*,
tables: Optional[List["Table"]] = None,
statistics: Optional[object] = None,
render: Optional[object] = None,
error: Optional["ErrorInfo"] = None,
**kwargs
):
super(BatchQueryResults, self).__init__(**kwargs)
self.tables = tables
self.statistics = statistics
self.render = render
self.error = error
class BatchRequest(msrest.serialization.Model):
"""An array of requests.
All required parameters must be populated in order to send to Azure.
:param requests: Required. An single request in a batch.
:type requests: list[~monitor_query_client.models.BatchQueryRequest]
"""
_validation = {
'requests': {'required': True},
}
_attribute_map = {
'requests': {'key': 'requests', 'type': '[BatchQueryRequest]'},
}
def __init__(
self,
*,
requests: List["BatchQueryRequest"],
**kwargs
):
super(BatchRequest, self).__init__(**kwargs)
self.requests = requests
class BatchResponse(msrest.serialization.Model):
"""Response to a batch query.
:param responses: An array of responses corresponding to each individual request in a batch.
:type responses: list[~monitor_query_client.models.BatchQueryResponse]
"""
_attribute_map = {
'responses': {'key': 'responses', 'type': '[BatchQueryResponse]'},
}
def __init__(
self,
*,
responses: Optional[List["BatchQueryResponse"]] = None,
**kwargs
):
super(BatchResponse, self).__init__(**kwargs)
self.responses = responses
class Column(msrest.serialization.Model):
"""A column in a table.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of this column.
:type name: str
:param type: Required. The data type of this column. Possible values include: "bool",
"datetime", "dynamic", "int", "long", "real", "string", "guid", "decimal", "timespan".
:type type: str or ~monitor_query_client.models.LogsColumnType
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "LogsColumnType"],
**kwargs
):
super(Column, self).__init__(**kwargs)
self.name = name
self.type = type
class ErrorDetail(msrest.serialization.Model):
"""Error details.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error's code.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param target: Indicates which property in the request is responsible for the error.
:type target: str
:param value: Indicates which value in 'target' is responsible for the error.
:type value: str
:param resources: Indicates resources which were responsible for the error.
:type resources: list[str]
:param additional_properties: Additional properties that can be provided on the error details
object.
:type additional_properties: object
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[str]'},
'additional_properties': {'key': 'additionalProperties', 'type': 'object'},
}
def __init__(
self,
*,
code: str,
message: str,
target: Optional[str] = None,
value: Optional[str] = None,
resources: Optional[List[str]] = None,
additional_properties: Optional[object] = None,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.value = value
self.resources = resources
self.additional_properties = additional_properties
class ErrorInfo(msrest.serialization.Model):
"""The code and message for an error.
All required parameters must be populated in order to send to Azure.
:param code: Required. A machine readable error code.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param details: error details.
:type details: list[~monitor_query_client.models.ErrorDetail]
:param innererror: Inner error details if they exist.
:type innererror: ~monitor_query_client.models.ErrorInfo
:param additional_properties: Additional properties that can be provided on the error info
object.
:type additional_properties: object
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'innererror': {'key': 'innererror', 'type': 'ErrorInfo'},
'additional_properties': {'key': 'additionalProperties', 'type': 'object'},
}
def __init__(
self,
*,
code: str,
message: str,
details: Optional[List["ErrorDetail"]] = None,
innererror: Optional["ErrorInfo"] = None,
additional_properties: Optional[object] = None,
**kwargs
):
super(ErrorInfo, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
self.innererror = innererror
self.additional_properties = additional_properties
class ErrorResponse(msrest.serialization.Model):
"""Contains details when the response code indicates an error.
All required parameters must be populated in order to send to Azure.
:param error: Required. The error details.
:type error: ~monitor_query_client.models.ErrorInfo
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorInfo'},
}
def __init__(
self,
*,
error: "ErrorInfo",
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseAutoGenerated(msrest.serialization.Model):
"""Describes the format of Error response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ErrorResponseAutoGenerated, self).__init__(**kwargs)
self.code = code
self.message = message
class LocalizableString(msrest.serialization.Model):
"""The localizable string class.
All required parameters must be populated in order to send to Azure.
:param value: Required. the invariant value.
:type value: str
:param localized_value: the locale specific value.
:type localized_value: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: str,
localized_value: Optional[str] = None,
**kwargs
):
super(LocalizableString, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class MetadataApplication(msrest.serialization.Model):
"""Application Insights apps that were part of the metadata request and that the user has access to.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the Application Insights app.
:type id: str
:param resource_id: Required. The ARM resource ID of the Application Insights app.
:type resource_id: str
:param name: Required. The name of the Application Insights app.
:type name: str
:param region: Required. The Azure region of the Application Insights app.
:type region: str
:param related: The related metadata items for the Application Insights app.
:type related: ~monitor_query_client.models.MetadataApplicationRelated
"""
_validation = {
'id': {'required': True},
'resource_id': {'required': True},
'name': {'required': True},
'region': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'region': {'key': 'region', 'type': 'str'},
'related': {'key': 'related', 'type': 'MetadataApplicationRelated'},
}
def __init__(
self,
*,
id: str,
resource_id: str,
name: str,
region: str,
related: Optional["MetadataApplicationRelated"] = None,
**kwargs
):
super(MetadataApplication, self).__init__(**kwargs)
self.id = id
self.resource_id = resource_id
self.name = name
self.region = region
self.related = related
class MetadataApplicationRelated(msrest.serialization.Model):
"""The related metadata items for the Application Insights app.
:param tables: The related tables for the Application Insights app.
:type tables: list[str]
:param functions: The related functions for the Application Insights app.
:type functions: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
**kwargs
):
super(MetadataApplicationRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
class MetadataCategory(msrest.serialization.Model):
"""Categories are used to group other metadata entities.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the category.
:type id: str
:param display_name: Required. The display name of the category.
:type display_name: str
:param description: The description of the category.
:type description: str
:param related: The related metadata items for the category.
:type related: ~monitor_query_client.models.MetadataCategoryRelated
"""
_validation = {
'id': {'required': True},
'display_name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'related': {'key': 'related', 'type': 'MetadataCategoryRelated'},
}
def __init__(
self,
*,
id: str,
display_name: str,
description: Optional[str] = None,
related: Optional["MetadataCategoryRelated"] = None,
**kwargs
):
super(MetadataCategory, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
self.related = related
class MetadataCategoryRelated(msrest.serialization.Model):
"""The related metadata items for the category.
:param tables: The tables related to the category.
:type tables: list[str]
:param functions: The functions related to the category.
:type functions: list[str]
:param resource_types: The resource types related to the category.
:type resource_types: list[str]
:param queries: The saved queries related to the category.
:type queries: list[str]
:param solutions: The Log Analytics solutions related to the category.
:type solutions: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
**kwargs
):
super(MetadataCategoryRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
self.resource_types = resource_types
self.queries = queries
self.solutions = solutions
class MetadataFunction(msrest.serialization.Model):
"""Functions are stored Kusto queries that can be specified as part of queries by using their name.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the function.
:type id: str
:param name: Required. The name of the function, to be used in queries.
:type name: str
:param parameters: The parameters/arguments of the function, if any.
:type parameters: str
:param display_name: The display name of the function.
:type display_name: str
:param description: The description of the function.
:type description: str
:param body: Required. The KQL body of the function.
:type body: str
:param tags: A set of tags. The tags associated with the function.
:type tags: object
:param properties: The properties of the function.
:type properties: object
:param related: The related metadata items for the function.
:type related: ~monitor_query_client.models.MetadataFunctionRelated
"""
_validation = {
'id': {'required': True},
'name': {'required': True},
'body': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataFunctionRelated'},
}
def __init__(
self,
*,
id: str,
name: str,
body: str,
parameters: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
related: Optional["MetadataFunctionRelated"] = None,
**kwargs
):
super(MetadataFunction, self).__init__(**kwargs)
self.id = id
self.name = name
self.parameters = parameters
self.display_name = display_name
self.description = description
self.body = body
self.tags = tags
self.properties = properties
self.related = related
class MetadataFunctionRelated(msrest.serialization.Model):
"""The related metadata items for the function.
:param tables: The related tables for the function.
:type tables: list[str]
:param solutions: The related Log Analytics solutions for the function.
:type solutions: list[str]
:param resource_types: The related resource types for the function.
:type resource_types: list[str]
:param categories: The related categories for the function.
:type categories: list[str]
:param workspaces: The related workspaces for the function.
:type workspaces: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'categories': {'key': 'categories', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
**kwargs
):
super(MetadataFunctionRelated, self).__init__(**kwargs)
self.tables = tables
self.solutions = solutions
self.resource_types = resource_types
self.categories = categories
self.workspaces = workspaces
class MetadataPermissions(msrest.serialization.Model):
"""Permission information for the metadata call, includes apps/workspaces/resource the user didn't have access to.
All required parameters must be populated in order to send to Azure.
:param workspaces: Required. The permission indication for the workspaces on the metadata
request.
:type workspaces: list[~monitor_query_client.models.MetadataPermissionsWorkspacesItem]
:param resources: The permission indication for the Azure resources on the metadata request.
:type resources: list[~monitor_query_client.models.MetadataPermissionsResourcesItem]
:param applications: The permission indication for the Application Insights apps on the
metadata request.
:type applications: list[~monitor_query_client.models.MetadataPermissionsApplicationsItem]
"""
_validation = {
'workspaces': {'required': True},
}
_attribute_map = {
'workspaces': {'key': 'workspaces', 'type': '[MetadataPermissionsWorkspacesItem]'},
'resources': {'key': 'resources', 'type': '[MetadataPermissionsResourcesItem]'},
'applications': {'key': 'applications', 'type': '[MetadataPermissionsApplicationsItem]'},
}
def __init__(
self,
*,
workspaces: List["MetadataPermissionsWorkspacesItem"],
resources: Optional[List["MetadataPermissionsResourcesItem"]] = None,
applications: Optional[List["MetadataPermissionsApplicationsItem"]] = None,
**kwargs
):
super(MetadataPermissions, self).__init__(**kwargs)
self.workspaces = workspaces
self.resources = resources
self.applications = applications
class MetadataPermissionsApplicationsItem(msrest.serialization.Model):
"""MetadataPermissionsApplicationsItem.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource ID on the permission indication.
:type resource_id: str
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: str,
**kwargs
):
super(MetadataPermissionsApplicationsItem, self).__init__(**kwargs)
self.resource_id = resource_id
class MetadataPermissionsResourcesItem(msrest.serialization.Model):
"""MetadataPermissionsResourcesItem.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource ID on the permission indication.
:type resource_id: str
:param deny_tables: The list of tables that were denied access for the resource ID.
:type deny_tables: list[str]
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'deny_tables': {'key': 'denyTables', 'type': '[str]'},
}
def __init__(
self,
*,
resource_id: str,
deny_tables: Optional[List[str]] = None,
**kwargs
):
super(MetadataPermissionsResourcesItem, self).__init__(**kwargs)
self.resource_id = resource_id
self.deny_tables = deny_tables
class MetadataPermissionsWorkspacesItem(msrest.serialization.Model):
"""MetadataPermissionsWorkspacesItem.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource ID on the permission indication.
:type resource_id: str
:param deny_tables: The list of tables that were denied access for the resource ID.
:type deny_tables: list[str]
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'deny_tables': {'key': 'denyTables', 'type': '[str]'},
}
def __init__(
self,
*,
resource_id: str,
deny_tables: Optional[List[str]] = None,
**kwargs
):
super(MetadataPermissionsWorkspacesItem, self).__init__(**kwargs)
self.resource_id = resource_id
self.deny_tables = deny_tables
class MetadataQuery(msrest.serialization.Model):
"""Queries are stored pieces of KQL, along with a list of relevant metadata items.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the query.
:type id: str
:param display_name: The display name of the query.
:type display_name: str
:param description: The description of the query.
:type description: str
:param body: Required. The KQL body of the query.
:type body: str
:param labels: The user defined labels associated with the query.
:type labels: list[str]
:param tags: A set of tags. The tags associated with the query.
:type tags: object
:param properties: The properties of the query.
:type properties: object
:param related: The related metadata items for the query.
:type related: ~monitor_query_client.models.MetadataQueryRelated
"""
_validation = {
'id': {'required': True},
'body': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'labels': {'key': 'labels', 'type': '[str]'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataQueryRelated'},
}
def __init__(
self,
*,
id: str,
body: str,
display_name: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[List[str]] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
related: Optional["MetadataQueryRelated"] = None,
**kwargs
):
super(MetadataQuery, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
self.body = body
self.labels = labels
self.tags = tags
self.properties = properties
self.related = related
class MetadataQueryRelated(msrest.serialization.Model):
"""The related metadata items for the query.
:param categories: The related categories for the query.
:type categories: list[str]
:param solutions: The related Log Analytics solutions for the query.
:type solutions: list[str]
:param resource_types: The related resource types for the query.
:type resource_types: list[str]
:param tables: The related tables for the query.
:type tables: list[str]
"""
_attribute_map = {
'categories': {'key': 'categories', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'tables': {'key': 'tables', 'type': '[str]'},
}
def __init__(
self,
*,
categories: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
tables: Optional[List[str]] = None,
**kwargs
):
super(MetadataQueryRelated, self).__init__(**kwargs)
self.categories = categories
self.solutions = solutions
self.resource_types = resource_types
self.tables = tables
class MetadataResourceType(msrest.serialization.Model):
"""Metadata about types of Azure resources, containing relevant tables, functions, etc.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the resource-type.
:type id: str
:param type: Required. The type of the resource-type.
:type type: str
:param display_name: The display name of the resource-type.
:type display_name: str
:param description: The description of the resource-type.
:type description: str
:param labels: The user-defined labels of the resource-type.
:type labels: list[str]
:param tags: A set of tags. The tags associated with the resource-type.
:type tags: object
:param properties: The properties of the resource-type.
:type properties: object
:param related: The related metadata items for the resource-type.
:type related: ~monitor_query_client.models.MetadataResourceTypeRelated
"""
_validation = {
'id': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'labels': {'key': 'labels', 'type': '[str]'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataResourceTypeRelated'},
}
def __init__(
self,
*,
id: str,
type: str,
display_name: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[List[str]] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
related: Optional["MetadataResourceTypeRelated"] = None,
**kwargs
):
super(MetadataResourceType, self).__init__(**kwargs)
self.id = id
self.type = type
self.display_name = display_name
self.description = description
self.labels = labels
self.tags = tags
self.properties = properties
self.related = related
class MetadataResourceTypeRelated(msrest.serialization.Model):
"""The related metadata items for the resource-type.
:param tables: The tables related to the resource-type.
:type tables: list[str]
:param functions: The functions related to the resource-type.
:type functions: list[str]
:param categories: The categories related to the resource-type.
:type categories: list[str]
:param queries: The queries related to the resource-type.
:type queries: list[str]
:param workspaces: The Log Analytics workspaces related to the resource-type.
:type workspaces: list[str]
:param resources: The Azure resources related to the resource-type.
:type resources: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'categories': {'key': 'categories', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
'resources': {'key': 'resources', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
resources: Optional[List[str]] = None,
**kwargs
):
super(MetadataResourceTypeRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
self.categories = categories
self.queries = queries
self.workspaces = workspaces
self.resources = resources
class MetadataResults(msrest.serialization.Model):
"""The metadata response for the app, including available tables, etc.
:param categories: The list of categories that are referenced in this metadata response.
:type categories: list[~monitor_query_client.models.MetadataCategory]
:param resource_types: The list of resource types that are referenced in this metadata
response.
:type resource_types: list[~monitor_query_client.models.MetadataResourceType]
:param solutions: The list of Log Analytics solutions installed on the workspace.
:type solutions: list[~monitor_query_client.models.MetadataSolution]
:param tables: The list of tables and columns that comprise the schema of the workspace.
:type tables: list[~monitor_query_client.models.MetadataTable]
:param functions: The list of functions stored on the workspace, or introduced by solutions
etc.
:type functions: list[~monitor_query_client.models.MetadataFunction]
:param queries: The list of saved queries stored on the workspace, or introduced by solutions,
resource types, etc.
:type queries: list[~monitor_query_client.models.MetadataQuery]
:param applications: The list of Application Insights apps that were referenced in the metadata
request.
:type applications: list[~monitor_query_client.models.MetadataApplication]
:param workspaces: The list of Log Analytics workspaces that were referenced in the metadata
request.
:type workspaces: list[~monitor_query_client.models.MetadataWorkspace]
:param resources: The list of Azure resources that were referenced in the metadata request.
:type resources: list[object]
:param permissions: The list of permission rules that affected the metadata request.
:type permissions: list[~monitor_query_client.models.MetadataPermissions]
"""
_validation = {
'categories': {'unique': True},
'resource_types': {'unique': True},
'solutions': {'unique': True},
'tables': {'unique': True},
'functions': {'unique': True},
'queries': {'unique': True},
'applications': {'unique': True},
'workspaces': {'unique': True},
'resources': {'unique': True},
'permissions': {'unique': True},
}
_attribute_map = {
'categories': {'key': 'categories', 'type': '[MetadataCategory]'},
'resource_types': {'key': 'resourceTypes', 'type': '[MetadataResourceType]'},
'solutions': {'key': 'solutions', 'type': '[MetadataSolution]'},
'tables': {'key': 'tables', 'type': '[MetadataTable]'},
'functions': {'key': 'functions', 'type': '[MetadataFunction]'},
'queries': {'key': 'queries', 'type': '[MetadataQuery]'},
'applications': {'key': 'applications', 'type': '[MetadataApplication]'},
'workspaces': {'key': 'workspaces', 'type': '[MetadataWorkspace]'},
'resources': {'key': 'resources', 'type': '[object]'},
'permissions': {'key': 'permissions', 'type': '[MetadataPermissions]'},
}
def __init__(
self,
*,
categories: Optional[List["MetadataCategory"]] = None,
resource_types: Optional[List["MetadataResourceType"]] = None,
solutions: Optional[List["MetadataSolution"]] = None,
tables: Optional[List["MetadataTable"]] = None,
functions: Optional[List["MetadataFunction"]] = None,
queries: Optional[List["MetadataQuery"]] = None,
applications: Optional[List["MetadataApplication"]] = None,
workspaces: Optional[List["MetadataWorkspace"]] = None,
resources: Optional[List[object]] = None,
permissions: Optional[List["MetadataPermissions"]] = None,
**kwargs
):
super(MetadataResults, self).__init__(**kwargs)
self.categories = categories
self.resource_types = resource_types
self.solutions = solutions
self.tables = tables
self.functions = functions
self.queries = queries
self.applications = applications
self.workspaces = workspaces
self.resources = resources
self.permissions = permissions
class MetadataSolution(msrest.serialization.Model):
"""Solutions can group tables and functions that are associated with a certain Azure Log Analytics offering.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the Log Analytics solution.
:type id: str
:param name: Required. The name of the Log Analytics solution.
:type name: str
:param display_name: The display name of the Log Analytics solution.
:type display_name: str
:param description: The description of the Log Analytics solution.
:type description: str
:param tags: A set of tags. The tags that are associated with the Log Analytics solution.
:type tags: object
:param properties: The properties of the Log Analytics solution.
:type properties: object
:param related: Required. The related metadata items for the Log Analytics solution.
:type related: ~monitor_query_client.models.MetadataSolutionRelated
"""
_validation = {
'id': {'required': True},
'name': {'required': True},
'related': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataSolutionRelated'},
}
def __init__(
self,
*,
id: str,
name: str,
related: "MetadataSolutionRelated",
display_name: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
**kwargs
):
super(MetadataSolution, self).__init__(**kwargs)
self.id = id
self.name = name
self.display_name = display_name
self.description = description
self.tags = tags
self.properties = properties
self.related = related
class MetadataSolutionRelated(msrest.serialization.Model):
"""The related metadata items for the Log Analytics solution.
All required parameters must be populated in order to send to Azure.
:param tables: Required. The tables related to the Log Analytics solution.
:type tables: list[str]
:param functions: The functions related to the Log Analytics solution.
:type functions: list[str]
:param categories: The categories related to the Log Analytics solution.
:type categories: list[str]
:param queries: The saved queries related to the Log Analytics solution.
:type queries: list[str]
:param workspaces: The Workspaces referenced in the metadata request that are related to the
Log Analytics solution.
:type workspaces: list[str]
"""
_validation = {
'tables': {'required': True},
}
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'categories': {'key': 'categories', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
}
def __init__(
self,
*,
tables: List[str],
functions: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
**kwargs
):
super(MetadataSolutionRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
self.categories = categories
self.queries = queries
self.workspaces = workspaces
class MetadataTable(msrest.serialization.Model):
"""Tables are part of the workspace schema, and contain a list of columns and a reference to other relevant metadata items.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the table.
:type id: str
:param name: Required. The name of the table.
:type name: str
:param description: The description of the table.
:type description: str
:param timespan_column: The column associated with the timespan query parameter for the table.
:type timespan_column: str
:param labels: The user defined labels of the table.
:type labels: list[str]
:param tags: A set of tags. The tags associated with the table.
:type tags: object
:param properties: The properties of the table.
:type properties: object
:param columns: The list of columns defined on the table.
:type columns: list[~monitor_query_client.models.MetadataTableColumnsItem]
:param related: The related metadata items for the table.
:type related: ~monitor_query_client.models.MetadataTableRelated
"""
_validation = {
'id': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'timespan_column': {'key': 'timespanColumn', 'type': 'str'},
'labels': {'key': 'labels', 'type': '[str]'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'columns': {'key': 'columns', 'type': '[MetadataTableColumnsItem]'},
'related': {'key': 'related', 'type': 'MetadataTableRelated'},
}
def __init__(
self,
*,
id: str,
name: str,
description: Optional[str] = None,
timespan_column: Optional[str] = None,
labels: Optional[List[str]] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
columns: Optional[List["MetadataTableColumnsItem"]] = None,
related: Optional["MetadataTableRelated"] = None,
**kwargs
):
super(MetadataTable, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.timespan_column = timespan_column
self.labels = labels
self.tags = tags
self.properties = properties
self.columns = columns
self.related = related
class MetadataTableColumnsItem(msrest.serialization.Model):
"""MetadataTableColumnsItem.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column.
:type name: str
:param description: The description of the column.
:type description: str
:param type: Required. The data type of the column. Possible values include: "bool",
"datetime", "dynamic", "int", "long", "real", "string", "guid", "decimal", "timespan".
:type type: str or ~monitor_query_client.models.MetadataColumnDataType
:param is_preferred_facet: A flag indicating this column is a preferred facet.
:type is_preferred_facet: bool
:param source: an indication of the source of the column, used only when multiple workspaces
have conflicting definition for the column.
:type source: object
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_preferred_facet': {'key': 'isPreferredFacet', 'type': 'bool'},
'source': {'key': 'source', 'type': 'object'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "MetadataColumnDataType"],
description: Optional[str] = None,
is_preferred_facet: Optional[bool] = None,
source: Optional[object] = None,
**kwargs
):
super(MetadataTableColumnsItem, self).__init__(**kwargs)
self.name = name
self.description = description
self.type = type
self.is_preferred_facet = is_preferred_facet
self.source = source
class MetadataTableRelated(msrest.serialization.Model):
"""The related metadata items for the table.
:param categories: The related categories for the table.
:type categories: list[str]
:param solutions: The related Log Analytics solutions for the table.
:type solutions: list[str]
:param resource_types: The related resource types for the table.
:type resource_types: list[str]
:param workspaces: The related Log Analytics workspaces for the table.
:type workspaces: list[str]
:param functions: The related functions for the table.
:type functions: list[str]
:param queries: The related saved queries for the table.
:type queries: list[str]
"""
_attribute_map = {
'categories': {'key': 'categories', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
}
def __init__(
self,
*,
categories: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
**kwargs
):
super(MetadataTableRelated, self).__init__(**kwargs)
self.categories = categories
self.solutions = solutions
self.resource_types = resource_types
self.workspaces = workspaces
self.functions = functions
self.queries = queries
class MetadataValue(msrest.serialization.Model):
"""Represents a metric metadata value.
:param name: the name of the metadata.
:type name: ~monitor_query_client.models.LocalizableString
:param value: the value of the metadata.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'LocalizableString'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional["LocalizableString"] = None,
value: Optional[str] = None,
**kwargs
):
super(MetadataValue, self).__init__(**kwargs)
self.name = name
self.value = value
class MetadataWorkspace(msrest.serialization.Model):
"""Log Analytics workspaces that were part of the metadata request and that the user has access to.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the Log Analytics workspace.
:type id: str
:param resource_id: Required. The ARM resource ID of the Log Analytics workspace.
:type resource_id: str
:param name: Required. The name of the Log Analytics workspace.
:type name: str
:param region: Required. The Azure region of the Log Analytics workspace.
:type region: str
:param related: The related metadata items for the Log Analytics workspace.
:type related: ~monitor_query_client.models.MetadataWorkspaceRelated
"""
_validation = {
'id': {'required': True},
'resource_id': {'required': True},
'name': {'required': True},
'region': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'region': {'key': 'region', 'type': 'str'},
'related': {'key': 'related', 'type': 'MetadataWorkspaceRelated'},
}
def __init__(
self,
*,
id: str,
resource_id: str,
name: str,
region: str,
related: Optional["MetadataWorkspaceRelated"] = None,
**kwargs
):
super(MetadataWorkspace, self).__init__(**kwargs)
self.id = id
self.resource_id = resource_id
self.name = name
self.region = region
self.related = related
class MetadataWorkspaceRelated(msrest.serialization.Model):
"""The related metadata items for the Log Analytics workspace.
:param tables: The related tables for the Log Analytics workspace.
:type tables: list[str]
:param solutions: The related Log Analytics solutions for the Log Analytics workspace.
:type solutions: list[str]
:param resource_types: The related resource types for the Log Analytics workspace.
:type resource_types: list[str]
:param functions: The related functions for the Log Analytics workspace.
:type functions: list[str]
:param resources: The related Azure resources for the Log Analytics workspace.
:type resources: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'resources': {'key': 'resources', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
resources: Optional[List[str]] = None,
**kwargs
):
super(MetadataWorkspaceRelated, self).__init__(**kwargs)
self.tables = tables
self.solutions = solutions
self.resource_types = resource_types
self.functions = functions
self.resources = resources
class Metric(msrest.serialization.Model):
"""The result data of a query.
All required parameters must be populated in order to send to Azure.
:param id: Required. the metric Id.
:type id: str
:param type: Required. the resource type of the metric resource.
:type type: str
:param name: Required. the name and the display name of the metric, i.e. it is localizable
string.
:type name: ~monitor_query_client.models.LocalizableString
:param display_description: Detailed description of this metric.
:type display_description: str
:param error_code: 'Success' or the error details on query failures for this metric.
:type error_code: str
:param error_message: Error message encountered querying this specific metric.
:type error_message: str
:param unit: Required. The unit of the metric. Possible values include: "Count", "Bytes",
"Seconds", "CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds",
"Unspecified", "Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:type unit: str or ~monitor_query_client.models.MetricUnit
:param timeseries: Required. the time series returned when a data query is performed.
:type timeseries: list[~monitor_query_client.models.TimeSeriesElement]
"""
_validation = {
'id': {'required': True},
'type': {'required': True},
'name': {'required': True},
'unit': {'required': True},
'timeseries': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'timeseries': {'key': 'timeseries', 'type': '[TimeSeriesElement]'},
}
def __init__(
self,
*,
id: str,
type: str,
name: "LocalizableString",
unit: Union[str, "MetricUnit"],
timeseries: List["TimeSeriesElement"],
display_description: Optional[str] = None,
error_code: Optional[str] = None,
error_message: Optional[str] = None,
**kwargs
):
super(Metric, self).__init__(**kwargs)
self.id = id
self.type = type
self.name = name
self.display_description = display_description
self.error_code = error_code
self.error_message = error_message
self.unit = unit
self.timeseries = timeseries
class MetricAvailability(msrest.serialization.Model):
"""Metric availability specifies the time grain (aggregation interval or frequency) and the retention period for that time grain.
:param time_grain: the time grain specifies the aggregation interval for the metric. Expressed
as a duration 'PT1M', 'P1D', etc.
:type time_grain: ~datetime.timedelta
:param retention: the retention period for the metric at the specified timegrain. Expressed as
a duration 'PT1M', 'P1D', etc.
:type retention: ~datetime.timedelta
"""
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'duration'},
'retention': {'key': 'retention', 'type': 'duration'},
}
def __init__(
self,
*,
time_grain: Optional[datetime.timedelta] = None,
retention: Optional[datetime.timedelta] = None,
**kwargs
):
super(MetricAvailability, self).__init__(**kwargs)
self.time_grain = time_grain
self.retention = retention
class MetricDefinition(msrest.serialization.Model):
"""Metric definition class specifies the metadata for a metric.
:param is_dimension_required: Flag to indicate whether the dimension is required.
:type is_dimension_required: bool
:param resource_id: the resource identifier of the resource that emitted the metric.
:type resource_id: str
:param namespace: the namespace the metric belongs to.
:type namespace: str
:param name: the name and the display name of the metric, i.e. it is a localizable string.
:type name: ~monitor_query_client.models.LocalizableString
:param display_description: Detailed description of this metric.
:type display_description: str
:param category: Custom category name for this metric.
:type category: str
:param metric_class: The class of the metric. Possible values include: "Availability",
"Transactions", "Errors", "Latency", "Saturation".
:type metric_class: str or ~monitor_query_client.models.MetricClass
:param unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds", "Unspecified",
"Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:type unit: str or ~monitor_query_client.models.MetricUnit
:param primary_aggregation_type: the primary aggregation type value defining how to use the
values for display. Possible values include: "None", "Average", "Count", "Minimum", "Maximum",
"Total".
:type primary_aggregation_type: str or ~monitor_query_client.models.AggregationType
:param supported_aggregation_types: the collection of what aggregation types are supported.
:type supported_aggregation_types: list[str or ~monitor_query_client.models.AggregationType]
:param metric_availabilities: the collection of what aggregation intervals are available to be
queried.
:type metric_availabilities: list[~monitor_query_client.models.MetricAvailability]
:param id: the resource identifier of the metric definition.
:type id: str
:param dimensions: the name and the display name of the dimension, i.e. it is a localizable
string.
:type dimensions: list[~monitor_query_client.models.LocalizableString]
"""
_attribute_map = {
'is_dimension_required': {'key': 'isDimensionRequired', 'type': 'bool'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'metric_class': {'key': 'metricClass', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'primaryAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'metric_availabilities': {'key': 'metricAvailabilities', 'type': '[MetricAvailability]'},
'id': {'key': 'id', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[LocalizableString]'},
}
def __init__(
self,
*,
is_dimension_required: Optional[bool] = None,
resource_id: Optional[str] = None,
namespace: Optional[str] = None,
name: Optional["LocalizableString"] = None,
display_description: Optional[str] = None,
category: Optional[str] = None,
metric_class: Optional[Union[str, "MetricClass"]] = None,
unit: Optional[Union[str, "MetricUnit"]] = None,
primary_aggregation_type: Optional[Union[str, "AggregationType"]] = None,
supported_aggregation_types: Optional[List[Union[str, "AggregationType"]]] = None,
metric_availabilities: Optional[List["MetricAvailability"]] = None,
id: Optional[str] = None,
dimensions: Optional[List["LocalizableString"]] = None,
**kwargs
):
super(MetricDefinition, self).__init__(**kwargs)
self.is_dimension_required = is_dimension_required
self.resource_id = resource_id
self.namespace = namespace
self.name = name
self.display_description = display_description
self.category = category
self.metric_class = metric_class
self.unit = unit
self.primary_aggregation_type = primary_aggregation_type
self.supported_aggregation_types = supported_aggregation_types
self.metric_availabilities = metric_availabilities
self.id = id
self.dimensions = dimensions
class MetricDefinitionCollection(msrest.serialization.Model):
"""Represents collection of metric definitions.
All required parameters must be populated in order to send to Azure.
:param value: Required. the values for the metric definitions.
:type value: list[~monitor_query_client.models.MetricDefinition]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricDefinition]'},
}
def __init__(
self,
*,
value: List["MetricDefinition"],
**kwargs
):
super(MetricDefinitionCollection, self).__init__(**kwargs)
self.value = value
class MetricNamespace(msrest.serialization.Model):
"""Metric namespace class specifies the metadata for a metric namespace.
:param id: The ID of the metric namespace.
:type id: str
:param type: The type of the namespace.
:type type: str
:param name: The escaped name of the namespace.
:type name: str
:param classification: Kind of namespace. Possible values include: "Platform", "Custom", "Qos".
:type classification: str or ~monitor_query_client.models.NamespaceClassification
:param properties: Properties which include the fully qualified namespace name.
:type properties: ~monitor_query_client.models.MetricNamespaceName
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'classification': {'key': 'classification', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'MetricNamespaceName'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
name: Optional[str] = None,
classification: Optional[Union[str, "NamespaceClassification"]] = None,
properties: Optional["MetricNamespaceName"] = None,
**kwargs
):
super(MetricNamespace, self).__init__(**kwargs)
self.id = id
self.type = type
self.name = name
self.classification = classification
self.properties = properties
class MetricNamespaceCollection(msrest.serialization.Model):
"""Represents collection of metric namespaces.
All required parameters must be populated in order to send to Azure.
:param value: Required. The values for the metric namespaces.
:type value: list[~monitor_query_client.models.MetricNamespace]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricNamespace]'},
}
def __init__(
self,
*,
value: List["MetricNamespace"],
**kwargs
):
super(MetricNamespaceCollection, self).__init__(**kwargs)
self.value = value
class MetricNamespaceName(msrest.serialization.Model):
"""The fully qualified metric namespace name.
:param metric_namespace_name: The metric namespace name.
:type metric_namespace_name: str
"""
_attribute_map = {
'metric_namespace_name': {'key': 'metricNamespaceName', 'type': 'str'},
}
def __init__(
self,
*,
metric_namespace_name: Optional[str] = None,
**kwargs
):
super(MetricNamespaceName, self).__init__(**kwargs)
self.metric_namespace_name = metric_namespace_name
class MetricValue(msrest.serialization.Model):
"""Represents a metric value.
All required parameters must be populated in order to send to Azure.
:param time_stamp: Required. the timestamp for the metric value in ISO 8601 format.
:type time_stamp: ~datetime.datetime
:param average: the average value in the time range.
:type average: float
:param minimum: the least value in the time range.
:type minimum: float
:param maximum: the greatest value in the time range.
:type maximum: float
:param total: the sum of all of the values in the time range.
:type total: float
:param count: the number of samples in the time range. Can be used to determine the number of
values that contributed to the average value.
:type count: float
"""
_validation = {
'time_stamp': {'required': True},
}
_attribute_map = {
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'average': {'key': 'average', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'total': {'key': 'total', 'type': 'float'},
'count': {'key': 'count', 'type': 'float'},
}
def __init__(
self,
*,
time_stamp: datetime.datetime,
average: Optional[float] = None,
minimum: Optional[float] = None,
maximum: Optional[float] = None,
total: Optional[float] = None,
count: Optional[float] = None,
**kwargs
):
super(MetricValue, self).__init__(**kwargs)
self.time_stamp = time_stamp
self.average = average
self.minimum = minimum
self.maximum = maximum
self.total = total
self.count = count
class QueryBody(msrest.serialization.Model):
"""The Analytics query. Learn more about the `Analytics query syntax <https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/>`_.
All required parameters must be populated in order to send to Azure.
:param query: Required. The query to execute.
:type query: str
:param timespan: Optional. The timespan over which to query data. This is an ISO8601 time
period value. This timespan is applied in addition to any that are specified in the query
expression.
:type timespan: str
:param workspaces: A list of workspaces that are included in the query.
:type workspaces: list[str]
"""
_validation = {
'query': {'required': True},
}
_attribute_map = {
'query': {'key': 'query', 'type': 'str'},
'timespan': {'key': 'timespan', 'type': 'str'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
}
def __init__(
self,
*,
query: str,
timespan: Optional[str] = None,
workspaces: Optional[List[str]] = None,
**kwargs
):
super(QueryBody, self).__init__(**kwargs)
self.query = query
self.timespan = timespan
self.workspaces = workspaces
class QueryResults(msrest.serialization.Model):
"""Contains the tables, columns & rows resulting from a query.
All required parameters must be populated in order to send to Azure.
:param tables: Required. The list of tables, columns and rows.
:type tables: list[~monitor_query_client.models.Table]
:param statistics: Any object.
:type statistics: object
:param render: Any object.
:type render: object
:param error: The code and message for an error.
:type error: ~monitor_query_client.models.ErrorInfo
"""
_validation = {
'tables': {'required': True},
}
_attribute_map = {
'tables': {'key': 'tables', 'type': '[Table]'},
'statistics': {'key': 'statistics', 'type': 'object'},
'render': {'key': 'render', 'type': 'object'},
'error': {'key': 'error', 'type': 'ErrorInfo'},
}
def __init__(
self,
*,
tables: List["Table"],
statistics: Optional[object] = None,
render: Optional[object] = None,
error: Optional["ErrorInfo"] = None,
**kwargs
):
super(QueryResults, self).__init__(**kwargs)
self.tables = tables
self.statistics = statistics
self.render = render
self.error = error
class Response(msrest.serialization.Model):
"""The response to a metrics query.
All required parameters must be populated in order to send to Azure.
:param cost: The integer value representing the relative cost of the query.
:type cost: int
:param timespan: Required. The timespan for which the data was retrieved. Its value consists of
two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned
back from what was originally requested.
:type timespan: str
:param interval: The interval (window size) for which the metric data was returned in. This
may be adjusted in the future and returned back from what was originally requested. This is
not present if a metadata request was made.
:type interval: ~datetime.timedelta
:param namespace: The namespace of the metrics being queried.
:type namespace: str
:param resourceregion: The region of the resource being queried for metrics.
:type resourceregion: str
:param value: Required. the value of the collection.
:type value: list[~monitor_query_client.models.Metric]
"""
_validation = {
'cost': {'minimum': 0},
'timespan': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'cost': {'key': 'cost', 'type': 'int'},
'timespan': {'key': 'timespan', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'duration'},
'namespace': {'key': 'namespace', 'type': 'str'},
'resourceregion': {'key': 'resourceregion', 'type': 'str'},
'value': {'key': 'value', 'type': '[Metric]'},
}
def __init__(
self,
*,
timespan: str,
value: List["Metric"],
cost: Optional[int] = None,
interval: Optional[datetime.timedelta] = None,
namespace: Optional[str] = None,
resourceregion: Optional[str] = None,
**kwargs
):
super(Response, self).__init__(**kwargs)
self.cost = cost
self.timespan = timespan
self.interval = interval
self.namespace = namespace
self.resourceregion = resourceregion
self.value = value
class Table(msrest.serialization.Model):
"""Contains the columns and rows for one table in a query response.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the table.
:type name: str
:param columns: Required. The list of columns in this table.
:type columns: list[~monitor_query_client.models.Column]
:param rows: Required. The resulting rows from this query.
:type rows: list[list[object]]
"""
_validation = {
'name': {'required': True},
'columns': {'required': True},
'rows': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'columns': {'key': 'columns', 'type': '[Column]'},
'rows': {'key': 'rows', 'type': '[[object]]'},
}
def __init__(
self,
*,
name: str,
columns: List["Column"],
rows: List[List[object]],
**kwargs
):
super(Table, self).__init__(**kwargs)
self.name = name
self.columns = columns
self.rows = rows
class TimeSeriesElement(msrest.serialization.Model):
"""A time series result type. The discriminator value is always TimeSeries in this case.
:param metadatavalues: the metadata values returned if $filter was specified in the call.
:type metadatavalues: list[~monitor_query_client.models.MetadataValue]
:param data: An array of data points representing the metric values. This is only returned if
a result type of data is specified.
:type data: list[~monitor_query_client.models.MetricValue]
"""
_attribute_map = {
'metadatavalues': {'key': 'metadatavalues', 'type': '[MetadataValue]'},
'data': {'key': 'data', 'type': '[MetricValue]'},
}
def __init__(
self,
*,
metadatavalues: Optional[List["MetadataValue"]] = None,
data: Optional[List["MetricValue"]] = None,
**kwargs
):
super(TimeSeriesElement, self).__init__(**kwargs)
self.metadatavalues = metadatavalues
self.data = data | sdk/monitor/azure-monitor-query/azure/monitor/query/_generated/models/_models_py3.py |
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._monitor_query_client_enums import *
class BatchQueryRequest(msrest.serialization.Model):
"""An single request in a batch.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The error details.
:type id: str
:param headers: Dictionary of :code:`<string>`.
:type headers: dict[str, str]
:param body: Required. The Analytics query. Learn more about the `Analytics query syntax
<https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/>`_.
:type body: ~monitor_query_client.models.QueryBody
:ivar path: Default value: "/query".
:vartype path: str
:ivar method: Default value: "POST".
:vartype method: str
:param workspace: Required. Workspace Id to be included in the query.
:type workspace: str
"""
_validation = {
'id': {'required': True},
'body': {'required': True},
'path': {'constant': True},
'method': {'constant': True},
'workspace': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'headers': {'key': 'headers', 'type': '{str}'},
'body': {'key': 'body', 'type': 'QueryBody'},
'path': {'key': 'path', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'workspace': {'key': 'workspace', 'type': 'str'},
}
path = "/query"
method = "POST"
def __init__(
self,
*,
id: str,
body: "QueryBody",
workspace: str,
headers: Optional[Dict[str, str]] = None,
**kwargs
):
super(BatchQueryRequest, self).__init__(**kwargs)
self.id = id
self.headers = headers
self.body = body
self.workspace = workspace
class BatchQueryResponse(msrest.serialization.Model):
"""BatchQueryResponse.
:param id:
:type id: str
:param status:
:type status: int
:param body: Contains the tables, columns & rows resulting from a query.
:type body: ~monitor_query_client.models.BatchQueryResults
:param headers: Dictionary of :code:`<string>`.
:type headers: dict[str, str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'int'},
'body': {'key': 'body', 'type': 'BatchQueryResults'},
'headers': {'key': 'headers', 'type': '{str}'},
}
def __init__(
self,
*,
id: Optional[str] = None,
status: Optional[int] = None,
body: Optional["BatchQueryResults"] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs
):
super(BatchQueryResponse, self).__init__(**kwargs)
self.id = id
self.status = status
self.body = body
self.headers = headers
class BatchQueryResults(msrest.serialization.Model):
"""Contains the tables, columns & rows resulting from a query.
:param tables: The list of tables, columns and rows.
:type tables: list[~monitor_query_client.models.Table]
:param statistics: Statistics represented in JSON format.
:type statistics: object
:param render: Visualization data in JSON format.
:type render: object
:param error: The code and message for an error.
:type error: ~monitor_query_client.models.ErrorInfo
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[Table]'},
'statistics': {'key': 'statistics', 'type': 'object'},
'render': {'key': 'render', 'type': 'object'},
'error': {'key': 'error', 'type': 'ErrorInfo'},
}
def __init__(
self,
*,
tables: Optional[List["Table"]] = None,
statistics: Optional[object] = None,
render: Optional[object] = None,
error: Optional["ErrorInfo"] = None,
**kwargs
):
super(BatchQueryResults, self).__init__(**kwargs)
self.tables = tables
self.statistics = statistics
self.render = render
self.error = error
class BatchRequest(msrest.serialization.Model):
"""An array of requests.
All required parameters must be populated in order to send to Azure.
:param requests: Required. An single request in a batch.
:type requests: list[~monitor_query_client.models.BatchQueryRequest]
"""
_validation = {
'requests': {'required': True},
}
_attribute_map = {
'requests': {'key': 'requests', 'type': '[BatchQueryRequest]'},
}
def __init__(
self,
*,
requests: List["BatchQueryRequest"],
**kwargs
):
super(BatchRequest, self).__init__(**kwargs)
self.requests = requests
class BatchResponse(msrest.serialization.Model):
"""Response to a batch query.
:param responses: An array of responses corresponding to each individual request in a batch.
:type responses: list[~monitor_query_client.models.BatchQueryResponse]
"""
_attribute_map = {
'responses': {'key': 'responses', 'type': '[BatchQueryResponse]'},
}
def __init__(
self,
*,
responses: Optional[List["BatchQueryResponse"]] = None,
**kwargs
):
super(BatchResponse, self).__init__(**kwargs)
self.responses = responses
class Column(msrest.serialization.Model):
"""A column in a table.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of this column.
:type name: str
:param type: Required. The data type of this column. Possible values include: "bool",
"datetime", "dynamic", "int", "long", "real", "string", "guid", "decimal", "timespan".
:type type: str or ~monitor_query_client.models.LogsColumnType
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "LogsColumnType"],
**kwargs
):
super(Column, self).__init__(**kwargs)
self.name = name
self.type = type
class ErrorDetail(msrest.serialization.Model):
"""Error details.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error's code.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param target: Indicates which property in the request is responsible for the error.
:type target: str
:param value: Indicates which value in 'target' is responsible for the error.
:type value: str
:param resources: Indicates resources which were responsible for the error.
:type resources: list[str]
:param additional_properties: Additional properties that can be provided on the error details
object.
:type additional_properties: object
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[str]'},
'additional_properties': {'key': 'additionalProperties', 'type': 'object'},
}
def __init__(
self,
*,
code: str,
message: str,
target: Optional[str] = None,
value: Optional[str] = None,
resources: Optional[List[str]] = None,
additional_properties: Optional[object] = None,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.value = value
self.resources = resources
self.additional_properties = additional_properties
class ErrorInfo(msrest.serialization.Model):
"""The code and message for an error.
All required parameters must be populated in order to send to Azure.
:param code: Required. A machine readable error code.
:type code: str
:param message: Required. A human readable error message.
:type message: str
:param details: error details.
:type details: list[~monitor_query_client.models.ErrorDetail]
:param innererror: Inner error details if they exist.
:type innererror: ~monitor_query_client.models.ErrorInfo
:param additional_properties: Additional properties that can be provided on the error info
object.
:type additional_properties: object
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'innererror': {'key': 'innererror', 'type': 'ErrorInfo'},
'additional_properties': {'key': 'additionalProperties', 'type': 'object'},
}
def __init__(
self,
*,
code: str,
message: str,
details: Optional[List["ErrorDetail"]] = None,
innererror: Optional["ErrorInfo"] = None,
additional_properties: Optional[object] = None,
**kwargs
):
super(ErrorInfo, self).__init__(**kwargs)
self.code = code
self.message = message
self.details = details
self.innererror = innererror
self.additional_properties = additional_properties
class ErrorResponse(msrest.serialization.Model):
"""Contains details when the response code indicates an error.
All required parameters must be populated in order to send to Azure.
:param error: Required. The error details.
:type error: ~monitor_query_client.models.ErrorInfo
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorInfo'},
}
def __init__(
self,
*,
error: "ErrorInfo",
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ErrorResponseAutoGenerated(msrest.serialization.Model):
"""Describes the format of Error response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ErrorResponseAutoGenerated, self).__init__(**kwargs)
self.code = code
self.message = message
class LocalizableString(msrest.serialization.Model):
"""The localizable string class.
All required parameters must be populated in order to send to Azure.
:param value: Required. the invariant value.
:type value: str
:param localized_value: the locale specific value.
:type localized_value: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: str,
localized_value: Optional[str] = None,
**kwargs
):
super(LocalizableString, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class MetadataApplication(msrest.serialization.Model):
"""Application Insights apps that were part of the metadata request and that the user has access to.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the Application Insights app.
:type id: str
:param resource_id: Required. The ARM resource ID of the Application Insights app.
:type resource_id: str
:param name: Required. The name of the Application Insights app.
:type name: str
:param region: Required. The Azure region of the Application Insights app.
:type region: str
:param related: The related metadata items for the Application Insights app.
:type related: ~monitor_query_client.models.MetadataApplicationRelated
"""
_validation = {
'id': {'required': True},
'resource_id': {'required': True},
'name': {'required': True},
'region': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'region': {'key': 'region', 'type': 'str'},
'related': {'key': 'related', 'type': 'MetadataApplicationRelated'},
}
def __init__(
self,
*,
id: str,
resource_id: str,
name: str,
region: str,
related: Optional["MetadataApplicationRelated"] = None,
**kwargs
):
super(MetadataApplication, self).__init__(**kwargs)
self.id = id
self.resource_id = resource_id
self.name = name
self.region = region
self.related = related
class MetadataApplicationRelated(msrest.serialization.Model):
"""The related metadata items for the Application Insights app.
:param tables: The related tables for the Application Insights app.
:type tables: list[str]
:param functions: The related functions for the Application Insights app.
:type functions: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
**kwargs
):
super(MetadataApplicationRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
class MetadataCategory(msrest.serialization.Model):
"""Categories are used to group other metadata entities.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the category.
:type id: str
:param display_name: Required. The display name of the category.
:type display_name: str
:param description: The description of the category.
:type description: str
:param related: The related metadata items for the category.
:type related: ~monitor_query_client.models.MetadataCategoryRelated
"""
_validation = {
'id': {'required': True},
'display_name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'related': {'key': 'related', 'type': 'MetadataCategoryRelated'},
}
def __init__(
self,
*,
id: str,
display_name: str,
description: Optional[str] = None,
related: Optional["MetadataCategoryRelated"] = None,
**kwargs
):
super(MetadataCategory, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
self.related = related
class MetadataCategoryRelated(msrest.serialization.Model):
"""The related metadata items for the category.
:param tables: The tables related to the category.
:type tables: list[str]
:param functions: The functions related to the category.
:type functions: list[str]
:param resource_types: The resource types related to the category.
:type resource_types: list[str]
:param queries: The saved queries related to the category.
:type queries: list[str]
:param solutions: The Log Analytics solutions related to the category.
:type solutions: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
**kwargs
):
super(MetadataCategoryRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
self.resource_types = resource_types
self.queries = queries
self.solutions = solutions
class MetadataFunction(msrest.serialization.Model):
"""Functions are stored Kusto queries that can be specified as part of queries by using their name.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the function.
:type id: str
:param name: Required. The name of the function, to be used in queries.
:type name: str
:param parameters: The parameters/arguments of the function, if any.
:type parameters: str
:param display_name: The display name of the function.
:type display_name: str
:param description: The description of the function.
:type description: str
:param body: Required. The KQL body of the function.
:type body: str
:param tags: A set of tags. The tags associated with the function.
:type tags: object
:param properties: The properties of the function.
:type properties: object
:param related: The related metadata items for the function.
:type related: ~monitor_query_client.models.MetadataFunctionRelated
"""
_validation = {
'id': {'required': True},
'name': {'required': True},
'body': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataFunctionRelated'},
}
def __init__(
self,
*,
id: str,
name: str,
body: str,
parameters: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
related: Optional["MetadataFunctionRelated"] = None,
**kwargs
):
super(MetadataFunction, self).__init__(**kwargs)
self.id = id
self.name = name
self.parameters = parameters
self.display_name = display_name
self.description = description
self.body = body
self.tags = tags
self.properties = properties
self.related = related
class MetadataFunctionRelated(msrest.serialization.Model):
"""The related metadata items for the function.
:param tables: The related tables for the function.
:type tables: list[str]
:param solutions: The related Log Analytics solutions for the function.
:type solutions: list[str]
:param resource_types: The related resource types for the function.
:type resource_types: list[str]
:param categories: The related categories for the function.
:type categories: list[str]
:param workspaces: The related workspaces for the function.
:type workspaces: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'categories': {'key': 'categories', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
**kwargs
):
super(MetadataFunctionRelated, self).__init__(**kwargs)
self.tables = tables
self.solutions = solutions
self.resource_types = resource_types
self.categories = categories
self.workspaces = workspaces
class MetadataPermissions(msrest.serialization.Model):
"""Permission information for the metadata call, includes apps/workspaces/resource the user didn't have access to.
All required parameters must be populated in order to send to Azure.
:param workspaces: Required. The permission indication for the workspaces on the metadata
request.
:type workspaces: list[~monitor_query_client.models.MetadataPermissionsWorkspacesItem]
:param resources: The permission indication for the Azure resources on the metadata request.
:type resources: list[~monitor_query_client.models.MetadataPermissionsResourcesItem]
:param applications: The permission indication for the Application Insights apps on the
metadata request.
:type applications: list[~monitor_query_client.models.MetadataPermissionsApplicationsItem]
"""
_validation = {
'workspaces': {'required': True},
}
_attribute_map = {
'workspaces': {'key': 'workspaces', 'type': '[MetadataPermissionsWorkspacesItem]'},
'resources': {'key': 'resources', 'type': '[MetadataPermissionsResourcesItem]'},
'applications': {'key': 'applications', 'type': '[MetadataPermissionsApplicationsItem]'},
}
def __init__(
self,
*,
workspaces: List["MetadataPermissionsWorkspacesItem"],
resources: Optional[List["MetadataPermissionsResourcesItem"]] = None,
applications: Optional[List["MetadataPermissionsApplicationsItem"]] = None,
**kwargs
):
super(MetadataPermissions, self).__init__(**kwargs)
self.workspaces = workspaces
self.resources = resources
self.applications = applications
class MetadataPermissionsApplicationsItem(msrest.serialization.Model):
"""MetadataPermissionsApplicationsItem.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource ID on the permission indication.
:type resource_id: str
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: str,
**kwargs
):
super(MetadataPermissionsApplicationsItem, self).__init__(**kwargs)
self.resource_id = resource_id
class MetadataPermissionsResourcesItem(msrest.serialization.Model):
"""MetadataPermissionsResourcesItem.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource ID on the permission indication.
:type resource_id: str
:param deny_tables: The list of tables that were denied access for the resource ID.
:type deny_tables: list[str]
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'deny_tables': {'key': 'denyTables', 'type': '[str]'},
}
def __init__(
self,
*,
resource_id: str,
deny_tables: Optional[List[str]] = None,
**kwargs
):
super(MetadataPermissionsResourcesItem, self).__init__(**kwargs)
self.resource_id = resource_id
self.deny_tables = deny_tables
class MetadataPermissionsWorkspacesItem(msrest.serialization.Model):
"""MetadataPermissionsWorkspacesItem.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The resource ID on the permission indication.
:type resource_id: str
:param deny_tables: The list of tables that were denied access for the resource ID.
:type deny_tables: list[str]
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'deny_tables': {'key': 'denyTables', 'type': '[str]'},
}
def __init__(
self,
*,
resource_id: str,
deny_tables: Optional[List[str]] = None,
**kwargs
):
super(MetadataPermissionsWorkspacesItem, self).__init__(**kwargs)
self.resource_id = resource_id
self.deny_tables = deny_tables
class MetadataQuery(msrest.serialization.Model):
"""Queries are stored pieces of KQL, along with a list of relevant metadata items.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the query.
:type id: str
:param display_name: The display name of the query.
:type display_name: str
:param description: The description of the query.
:type description: str
:param body: Required. The KQL body of the query.
:type body: str
:param labels: The user defined labels associated with the query.
:type labels: list[str]
:param tags: A set of tags. The tags associated with the query.
:type tags: object
:param properties: The properties of the query.
:type properties: object
:param related: The related metadata items for the query.
:type related: ~monitor_query_client.models.MetadataQueryRelated
"""
_validation = {
'id': {'required': True},
'body': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'labels': {'key': 'labels', 'type': '[str]'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataQueryRelated'},
}
def __init__(
self,
*,
id: str,
body: str,
display_name: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[List[str]] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
related: Optional["MetadataQueryRelated"] = None,
**kwargs
):
super(MetadataQuery, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
self.body = body
self.labels = labels
self.tags = tags
self.properties = properties
self.related = related
class MetadataQueryRelated(msrest.serialization.Model):
"""The related metadata items for the query.
:param categories: The related categories for the query.
:type categories: list[str]
:param solutions: The related Log Analytics solutions for the query.
:type solutions: list[str]
:param resource_types: The related resource types for the query.
:type resource_types: list[str]
:param tables: The related tables for the query.
:type tables: list[str]
"""
_attribute_map = {
'categories': {'key': 'categories', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'tables': {'key': 'tables', 'type': '[str]'},
}
def __init__(
self,
*,
categories: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
tables: Optional[List[str]] = None,
**kwargs
):
super(MetadataQueryRelated, self).__init__(**kwargs)
self.categories = categories
self.solutions = solutions
self.resource_types = resource_types
self.tables = tables
class MetadataResourceType(msrest.serialization.Model):
"""Metadata about types of Azure resources, containing relevant tables, functions, etc.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the resource-type.
:type id: str
:param type: Required. The type of the resource-type.
:type type: str
:param display_name: The display name of the resource-type.
:type display_name: str
:param description: The description of the resource-type.
:type description: str
:param labels: The user-defined labels of the resource-type.
:type labels: list[str]
:param tags: A set of tags. The tags associated with the resource-type.
:type tags: object
:param properties: The properties of the resource-type.
:type properties: object
:param related: The related metadata items for the resource-type.
:type related: ~monitor_query_client.models.MetadataResourceTypeRelated
"""
_validation = {
'id': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'labels': {'key': 'labels', 'type': '[str]'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataResourceTypeRelated'},
}
def __init__(
self,
*,
id: str,
type: str,
display_name: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[List[str]] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
related: Optional["MetadataResourceTypeRelated"] = None,
**kwargs
):
super(MetadataResourceType, self).__init__(**kwargs)
self.id = id
self.type = type
self.display_name = display_name
self.description = description
self.labels = labels
self.tags = tags
self.properties = properties
self.related = related
class MetadataResourceTypeRelated(msrest.serialization.Model):
"""The related metadata items for the resource-type.
:param tables: The tables related to the resource-type.
:type tables: list[str]
:param functions: The functions related to the resource-type.
:type functions: list[str]
:param categories: The categories related to the resource-type.
:type categories: list[str]
:param queries: The queries related to the resource-type.
:type queries: list[str]
:param workspaces: The Log Analytics workspaces related to the resource-type.
:type workspaces: list[str]
:param resources: The Azure resources related to the resource-type.
:type resources: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'categories': {'key': 'categories', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
'resources': {'key': 'resources', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
resources: Optional[List[str]] = None,
**kwargs
):
super(MetadataResourceTypeRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
self.categories = categories
self.queries = queries
self.workspaces = workspaces
self.resources = resources
class MetadataResults(msrest.serialization.Model):
"""The metadata response for the app, including available tables, etc.
:param categories: The list of categories that are referenced in this metadata response.
:type categories: list[~monitor_query_client.models.MetadataCategory]
:param resource_types: The list of resource types that are referenced in this metadata
response.
:type resource_types: list[~monitor_query_client.models.MetadataResourceType]
:param solutions: The list of Log Analytics solutions installed on the workspace.
:type solutions: list[~monitor_query_client.models.MetadataSolution]
:param tables: The list of tables and columns that comprise the schema of the workspace.
:type tables: list[~monitor_query_client.models.MetadataTable]
:param functions: The list of functions stored on the workspace, or introduced by solutions
etc.
:type functions: list[~monitor_query_client.models.MetadataFunction]
:param queries: The list of saved queries stored on the workspace, or introduced by solutions,
resource types, etc.
:type queries: list[~monitor_query_client.models.MetadataQuery]
:param applications: The list of Application Insights apps that were referenced in the metadata
request.
:type applications: list[~monitor_query_client.models.MetadataApplication]
:param workspaces: The list of Log Analytics workspaces that were referenced in the metadata
request.
:type workspaces: list[~monitor_query_client.models.MetadataWorkspace]
:param resources: The list of Azure resources that were referenced in the metadata request.
:type resources: list[object]
:param permissions: The list of permission rules that affected the metadata request.
:type permissions: list[~monitor_query_client.models.MetadataPermissions]
"""
_validation = {
'categories': {'unique': True},
'resource_types': {'unique': True},
'solutions': {'unique': True},
'tables': {'unique': True},
'functions': {'unique': True},
'queries': {'unique': True},
'applications': {'unique': True},
'workspaces': {'unique': True},
'resources': {'unique': True},
'permissions': {'unique': True},
}
_attribute_map = {
'categories': {'key': 'categories', 'type': '[MetadataCategory]'},
'resource_types': {'key': 'resourceTypes', 'type': '[MetadataResourceType]'},
'solutions': {'key': 'solutions', 'type': '[MetadataSolution]'},
'tables': {'key': 'tables', 'type': '[MetadataTable]'},
'functions': {'key': 'functions', 'type': '[MetadataFunction]'},
'queries': {'key': 'queries', 'type': '[MetadataQuery]'},
'applications': {'key': 'applications', 'type': '[MetadataApplication]'},
'workspaces': {'key': 'workspaces', 'type': '[MetadataWorkspace]'},
'resources': {'key': 'resources', 'type': '[object]'},
'permissions': {'key': 'permissions', 'type': '[MetadataPermissions]'},
}
def __init__(
self,
*,
categories: Optional[List["MetadataCategory"]] = None,
resource_types: Optional[List["MetadataResourceType"]] = None,
solutions: Optional[List["MetadataSolution"]] = None,
tables: Optional[List["MetadataTable"]] = None,
functions: Optional[List["MetadataFunction"]] = None,
queries: Optional[List["MetadataQuery"]] = None,
applications: Optional[List["MetadataApplication"]] = None,
workspaces: Optional[List["MetadataWorkspace"]] = None,
resources: Optional[List[object]] = None,
permissions: Optional[List["MetadataPermissions"]] = None,
**kwargs
):
super(MetadataResults, self).__init__(**kwargs)
self.categories = categories
self.resource_types = resource_types
self.solutions = solutions
self.tables = tables
self.functions = functions
self.queries = queries
self.applications = applications
self.workspaces = workspaces
self.resources = resources
self.permissions = permissions
class MetadataSolution(msrest.serialization.Model):
"""Solutions can group tables and functions that are associated with a certain Azure Log Analytics offering.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the Log Analytics solution.
:type id: str
:param name: Required. The name of the Log Analytics solution.
:type name: str
:param display_name: The display name of the Log Analytics solution.
:type display_name: str
:param description: The description of the Log Analytics solution.
:type description: str
:param tags: A set of tags. The tags that are associated with the Log Analytics solution.
:type tags: object
:param properties: The properties of the Log Analytics solution.
:type properties: object
:param related: Required. The related metadata items for the Log Analytics solution.
:type related: ~monitor_query_client.models.MetadataSolutionRelated
"""
_validation = {
'id': {'required': True},
'name': {'required': True},
'related': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'related': {'key': 'related', 'type': 'MetadataSolutionRelated'},
}
def __init__(
self,
*,
id: str,
name: str,
related: "MetadataSolutionRelated",
display_name: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
**kwargs
):
super(MetadataSolution, self).__init__(**kwargs)
self.id = id
self.name = name
self.display_name = display_name
self.description = description
self.tags = tags
self.properties = properties
self.related = related
class MetadataSolutionRelated(msrest.serialization.Model):
"""The related metadata items for the Log Analytics solution.
All required parameters must be populated in order to send to Azure.
:param tables: Required. The tables related to the Log Analytics solution.
:type tables: list[str]
:param functions: The functions related to the Log Analytics solution.
:type functions: list[str]
:param categories: The categories related to the Log Analytics solution.
:type categories: list[str]
:param queries: The saved queries related to the Log Analytics solution.
:type queries: list[str]
:param workspaces: The Workspaces referenced in the metadata request that are related to the
Log Analytics solution.
:type workspaces: list[str]
"""
_validation = {
'tables': {'required': True},
}
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'categories': {'key': 'categories', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
}
def __init__(
self,
*,
tables: List[str],
functions: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
**kwargs
):
super(MetadataSolutionRelated, self).__init__(**kwargs)
self.tables = tables
self.functions = functions
self.categories = categories
self.queries = queries
self.workspaces = workspaces
class MetadataTable(msrest.serialization.Model):
"""Tables are part of the workspace schema, and contain a list of columns and a reference to other relevant metadata items.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the table.
:type id: str
:param name: Required. The name of the table.
:type name: str
:param description: The description of the table.
:type description: str
:param timespan_column: The column associated with the timespan query parameter for the table.
:type timespan_column: str
:param labels: The user defined labels of the table.
:type labels: list[str]
:param tags: A set of tags. The tags associated with the table.
:type tags: object
:param properties: The properties of the table.
:type properties: object
:param columns: The list of columns defined on the table.
:type columns: list[~monitor_query_client.models.MetadataTableColumnsItem]
:param related: The related metadata items for the table.
:type related: ~monitor_query_client.models.MetadataTableRelated
"""
_validation = {
'id': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'timespan_column': {'key': 'timespanColumn', 'type': 'str'},
'labels': {'key': 'labels', 'type': '[str]'},
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'object'},
'columns': {'key': 'columns', 'type': '[MetadataTableColumnsItem]'},
'related': {'key': 'related', 'type': 'MetadataTableRelated'},
}
def __init__(
self,
*,
id: str,
name: str,
description: Optional[str] = None,
timespan_column: Optional[str] = None,
labels: Optional[List[str]] = None,
tags: Optional[object] = None,
properties: Optional[object] = None,
columns: Optional[List["MetadataTableColumnsItem"]] = None,
related: Optional["MetadataTableRelated"] = None,
**kwargs
):
super(MetadataTable, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.timespan_column = timespan_column
self.labels = labels
self.tags = tags
self.properties = properties
self.columns = columns
self.related = related
class MetadataTableColumnsItem(msrest.serialization.Model):
"""MetadataTableColumnsItem.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the column.
:type name: str
:param description: The description of the column.
:type description: str
:param type: Required. The data type of the column. Possible values include: "bool",
"datetime", "dynamic", "int", "long", "real", "string", "guid", "decimal", "timespan".
:type type: str or ~monitor_query_client.models.MetadataColumnDataType
:param is_preferred_facet: A flag indicating this column is a preferred facet.
:type is_preferred_facet: bool
:param source: an indication of the source of the column, used only when multiple workspaces
have conflicting definition for the column.
:type source: object
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_preferred_facet': {'key': 'isPreferredFacet', 'type': 'bool'},
'source': {'key': 'source', 'type': 'object'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "MetadataColumnDataType"],
description: Optional[str] = None,
is_preferred_facet: Optional[bool] = None,
source: Optional[object] = None,
**kwargs
):
super(MetadataTableColumnsItem, self).__init__(**kwargs)
self.name = name
self.description = description
self.type = type
self.is_preferred_facet = is_preferred_facet
self.source = source
class MetadataTableRelated(msrest.serialization.Model):
"""The related metadata items for the table.
:param categories: The related categories for the table.
:type categories: list[str]
:param solutions: The related Log Analytics solutions for the table.
:type solutions: list[str]
:param resource_types: The related resource types for the table.
:type resource_types: list[str]
:param workspaces: The related Log Analytics workspaces for the table.
:type workspaces: list[str]
:param functions: The related functions for the table.
:type functions: list[str]
:param queries: The related saved queries for the table.
:type queries: list[str]
"""
_attribute_map = {
'categories': {'key': 'categories', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'queries': {'key': 'queries', 'type': '[str]'},
}
def __init__(
self,
*,
categories: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
workspaces: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
queries: Optional[List[str]] = None,
**kwargs
):
super(MetadataTableRelated, self).__init__(**kwargs)
self.categories = categories
self.solutions = solutions
self.resource_types = resource_types
self.workspaces = workspaces
self.functions = functions
self.queries = queries
class MetadataValue(msrest.serialization.Model):
"""Represents a metric metadata value.
:param name: the name of the metadata.
:type name: ~monitor_query_client.models.LocalizableString
:param value: the value of the metadata.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'LocalizableString'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional["LocalizableString"] = None,
value: Optional[str] = None,
**kwargs
):
super(MetadataValue, self).__init__(**kwargs)
self.name = name
self.value = value
class MetadataWorkspace(msrest.serialization.Model):
"""Log Analytics workspaces that were part of the metadata request and that the user has access to.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the Log Analytics workspace.
:type id: str
:param resource_id: Required. The ARM resource ID of the Log Analytics workspace.
:type resource_id: str
:param name: Required. The name of the Log Analytics workspace.
:type name: str
:param region: Required. The Azure region of the Log Analytics workspace.
:type region: str
:param related: The related metadata items for the Log Analytics workspace.
:type related: ~monitor_query_client.models.MetadataWorkspaceRelated
"""
_validation = {
'id': {'required': True},
'resource_id': {'required': True},
'name': {'required': True},
'region': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'region': {'key': 'region', 'type': 'str'},
'related': {'key': 'related', 'type': 'MetadataWorkspaceRelated'},
}
def __init__(
self,
*,
id: str,
resource_id: str,
name: str,
region: str,
related: Optional["MetadataWorkspaceRelated"] = None,
**kwargs
):
super(MetadataWorkspace, self).__init__(**kwargs)
self.id = id
self.resource_id = resource_id
self.name = name
self.region = region
self.related = related
class MetadataWorkspaceRelated(msrest.serialization.Model):
"""The related metadata items for the Log Analytics workspace.
:param tables: The related tables for the Log Analytics workspace.
:type tables: list[str]
:param solutions: The related Log Analytics solutions for the Log Analytics workspace.
:type solutions: list[str]
:param resource_types: The related resource types for the Log Analytics workspace.
:type resource_types: list[str]
:param functions: The related functions for the Log Analytics workspace.
:type functions: list[str]
:param resources: The related Azure resources for the Log Analytics workspace.
:type resources: list[str]
"""
_attribute_map = {
'tables': {'key': 'tables', 'type': '[str]'},
'solutions': {'key': 'solutions', 'type': '[str]'},
'resource_types': {'key': 'resourceTypes', 'type': '[str]'},
'functions': {'key': 'functions', 'type': '[str]'},
'resources': {'key': 'resources', 'type': '[str]'},
}
def __init__(
self,
*,
tables: Optional[List[str]] = None,
solutions: Optional[List[str]] = None,
resource_types: Optional[List[str]] = None,
functions: Optional[List[str]] = None,
resources: Optional[List[str]] = None,
**kwargs
):
super(MetadataWorkspaceRelated, self).__init__(**kwargs)
self.tables = tables
self.solutions = solutions
self.resource_types = resource_types
self.functions = functions
self.resources = resources
class Metric(msrest.serialization.Model):
"""The result data of a query.
All required parameters must be populated in order to send to Azure.
:param id: Required. the metric Id.
:type id: str
:param type: Required. the resource type of the metric resource.
:type type: str
:param name: Required. the name and the display name of the metric, i.e. it is localizable
string.
:type name: ~monitor_query_client.models.LocalizableString
:param display_description: Detailed description of this metric.
:type display_description: str
:param error_code: 'Success' or the error details on query failures for this metric.
:type error_code: str
:param error_message: Error message encountered querying this specific metric.
:type error_message: str
:param unit: Required. The unit of the metric. Possible values include: "Count", "Bytes",
"Seconds", "CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds",
"Unspecified", "Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:type unit: str or ~monitor_query_client.models.MetricUnit
:param timeseries: Required. the time series returned when a data query is performed.
:type timeseries: list[~monitor_query_client.models.TimeSeriesElement]
"""
_validation = {
'id': {'required': True},
'type': {'required': True},
'name': {'required': True},
'unit': {'required': True},
'timeseries': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'timeseries': {'key': 'timeseries', 'type': '[TimeSeriesElement]'},
}
def __init__(
self,
*,
id: str,
type: str,
name: "LocalizableString",
unit: Union[str, "MetricUnit"],
timeseries: List["TimeSeriesElement"],
display_description: Optional[str] = None,
error_code: Optional[str] = None,
error_message: Optional[str] = None,
**kwargs
):
super(Metric, self).__init__(**kwargs)
self.id = id
self.type = type
self.name = name
self.display_description = display_description
self.error_code = error_code
self.error_message = error_message
self.unit = unit
self.timeseries = timeseries
class MetricAvailability(msrest.serialization.Model):
"""Metric availability specifies the time grain (aggregation interval or frequency) and the retention period for that time grain.
:param time_grain: the time grain specifies the aggregation interval for the metric. Expressed
as a duration 'PT1M', 'P1D', etc.
:type time_grain: ~datetime.timedelta
:param retention: the retention period for the metric at the specified timegrain. Expressed as
a duration 'PT1M', 'P1D', etc.
:type retention: ~datetime.timedelta
"""
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'duration'},
'retention': {'key': 'retention', 'type': 'duration'},
}
def __init__(
self,
*,
time_grain: Optional[datetime.timedelta] = None,
retention: Optional[datetime.timedelta] = None,
**kwargs
):
super(MetricAvailability, self).__init__(**kwargs)
self.time_grain = time_grain
self.retention = retention
class MetricDefinition(msrest.serialization.Model):
"""Metric definition class specifies the metadata for a metric.
:param is_dimension_required: Flag to indicate whether the dimension is required.
:type is_dimension_required: bool
:param resource_id: the resource identifier of the resource that emitted the metric.
:type resource_id: str
:param namespace: the namespace the metric belongs to.
:type namespace: str
:param name: the name and the display name of the metric, i.e. it is a localizable string.
:type name: ~monitor_query_client.models.LocalizableString
:param display_description: Detailed description of this metric.
:type display_description: str
:param category: Custom category name for this metric.
:type category: str
:param metric_class: The class of the metric. Possible values include: "Availability",
"Transactions", "Errors", "Latency", "Saturation".
:type metric_class: str or ~monitor_query_client.models.MetricClass
:param unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds", "Unspecified",
"Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:type unit: str or ~monitor_query_client.models.MetricUnit
:param primary_aggregation_type: the primary aggregation type value defining how to use the
values for display. Possible values include: "None", "Average", "Count", "Minimum", "Maximum",
"Total".
:type primary_aggregation_type: str or ~monitor_query_client.models.AggregationType
:param supported_aggregation_types: the collection of what aggregation types are supported.
:type supported_aggregation_types: list[str or ~monitor_query_client.models.AggregationType]
:param metric_availabilities: the collection of what aggregation intervals are available to be
queried.
:type metric_availabilities: list[~monitor_query_client.models.MetricAvailability]
:param id: the resource identifier of the metric definition.
:type id: str
:param dimensions: the name and the display name of the dimension, i.e. it is a localizable
string.
:type dimensions: list[~monitor_query_client.models.LocalizableString]
"""
_attribute_map = {
'is_dimension_required': {'key': 'isDimensionRequired', 'type': 'bool'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'metric_class': {'key': 'metricClass', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'primaryAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'metric_availabilities': {'key': 'metricAvailabilities', 'type': '[MetricAvailability]'},
'id': {'key': 'id', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[LocalizableString]'},
}
def __init__(
self,
*,
is_dimension_required: Optional[bool] = None,
resource_id: Optional[str] = None,
namespace: Optional[str] = None,
name: Optional["LocalizableString"] = None,
display_description: Optional[str] = None,
category: Optional[str] = None,
metric_class: Optional[Union[str, "MetricClass"]] = None,
unit: Optional[Union[str, "MetricUnit"]] = None,
primary_aggregation_type: Optional[Union[str, "AggregationType"]] = None,
supported_aggregation_types: Optional[List[Union[str, "AggregationType"]]] = None,
metric_availabilities: Optional[List["MetricAvailability"]] = None,
id: Optional[str] = None,
dimensions: Optional[List["LocalizableString"]] = None,
**kwargs
):
super(MetricDefinition, self).__init__(**kwargs)
self.is_dimension_required = is_dimension_required
self.resource_id = resource_id
self.namespace = namespace
self.name = name
self.display_description = display_description
self.category = category
self.metric_class = metric_class
self.unit = unit
self.primary_aggregation_type = primary_aggregation_type
self.supported_aggregation_types = supported_aggregation_types
self.metric_availabilities = metric_availabilities
self.id = id
self.dimensions = dimensions
class MetricDefinitionCollection(msrest.serialization.Model):
"""Represents collection of metric definitions.
All required parameters must be populated in order to send to Azure.
:param value: Required. the values for the metric definitions.
:type value: list[~monitor_query_client.models.MetricDefinition]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricDefinition]'},
}
def __init__(
self,
*,
value: List["MetricDefinition"],
**kwargs
):
super(MetricDefinitionCollection, self).__init__(**kwargs)
self.value = value
class MetricNamespace(msrest.serialization.Model):
"""Metric namespace class specifies the metadata for a metric namespace.
:param id: The ID of the metric namespace.
:type id: str
:param type: The type of the namespace.
:type type: str
:param name: The escaped name of the namespace.
:type name: str
:param classification: Kind of namespace. Possible values include: "Platform", "Custom", "Qos".
:type classification: str or ~monitor_query_client.models.NamespaceClassification
:param properties: Properties which include the fully qualified namespace name.
:type properties: ~monitor_query_client.models.MetricNamespaceName
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'classification': {'key': 'classification', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'MetricNamespaceName'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
name: Optional[str] = None,
classification: Optional[Union[str, "NamespaceClassification"]] = None,
properties: Optional["MetricNamespaceName"] = None,
**kwargs
):
super(MetricNamespace, self).__init__(**kwargs)
self.id = id
self.type = type
self.name = name
self.classification = classification
self.properties = properties
class MetricNamespaceCollection(msrest.serialization.Model):
"""Represents collection of metric namespaces.
All required parameters must be populated in order to send to Azure.
:param value: Required. The values for the metric namespaces.
:type value: list[~monitor_query_client.models.MetricNamespace]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricNamespace]'},
}
def __init__(
self,
*,
value: List["MetricNamespace"],
**kwargs
):
super(MetricNamespaceCollection, self).__init__(**kwargs)
self.value = value
class MetricNamespaceName(msrest.serialization.Model):
"""The fully qualified metric namespace name.
:param metric_namespace_name: The metric namespace name.
:type metric_namespace_name: str
"""
_attribute_map = {
'metric_namespace_name': {'key': 'metricNamespaceName', 'type': 'str'},
}
def __init__(
self,
*,
metric_namespace_name: Optional[str] = None,
**kwargs
):
super(MetricNamespaceName, self).__init__(**kwargs)
self.metric_namespace_name = metric_namespace_name
class MetricValue(msrest.serialization.Model):
"""Represents a metric value.
All required parameters must be populated in order to send to Azure.
:param time_stamp: Required. the timestamp for the metric value in ISO 8601 format.
:type time_stamp: ~datetime.datetime
:param average: the average value in the time range.
:type average: float
:param minimum: the least value in the time range.
:type minimum: float
:param maximum: the greatest value in the time range.
:type maximum: float
:param total: the sum of all of the values in the time range.
:type total: float
:param count: the number of samples in the time range. Can be used to determine the number of
values that contributed to the average value.
:type count: float
"""
_validation = {
'time_stamp': {'required': True},
}
_attribute_map = {
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'average': {'key': 'average', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'total': {'key': 'total', 'type': 'float'},
'count': {'key': 'count', 'type': 'float'},
}
def __init__(
self,
*,
time_stamp: datetime.datetime,
average: Optional[float] = None,
minimum: Optional[float] = None,
maximum: Optional[float] = None,
total: Optional[float] = None,
count: Optional[float] = None,
**kwargs
):
super(MetricValue, self).__init__(**kwargs)
self.time_stamp = time_stamp
self.average = average
self.minimum = minimum
self.maximum = maximum
self.total = total
self.count = count
class QueryBody(msrest.serialization.Model):
"""The Analytics query. Learn more about the `Analytics query syntax <https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/>`_.
All required parameters must be populated in order to send to Azure.
:param query: Required. The query to execute.
:type query: str
:param timespan: Optional. The timespan over which to query data. This is an ISO8601 time
period value. This timespan is applied in addition to any that are specified in the query
expression.
:type timespan: str
:param workspaces: A list of workspaces that are included in the query.
:type workspaces: list[str]
"""
_validation = {
'query': {'required': True},
}
_attribute_map = {
'query': {'key': 'query', 'type': 'str'},
'timespan': {'key': 'timespan', 'type': 'str'},
'workspaces': {'key': 'workspaces', 'type': '[str]'},
}
def __init__(
self,
*,
query: str,
timespan: Optional[str] = None,
workspaces: Optional[List[str]] = None,
**kwargs
):
super(QueryBody, self).__init__(**kwargs)
self.query = query
self.timespan = timespan
self.workspaces = workspaces
class QueryResults(msrest.serialization.Model):
"""Contains the tables, columns & rows resulting from a query.
All required parameters must be populated in order to send to Azure.
:param tables: Required. The list of tables, columns and rows.
:type tables: list[~monitor_query_client.models.Table]
:param statistics: Any object.
:type statistics: object
:param render: Any object.
:type render: object
:param error: The code and message for an error.
:type error: ~monitor_query_client.models.ErrorInfo
"""
_validation = {
'tables': {'required': True},
}
_attribute_map = {
'tables': {'key': 'tables', 'type': '[Table]'},
'statistics': {'key': 'statistics', 'type': 'object'},
'render': {'key': 'render', 'type': 'object'},
'error': {'key': 'error', 'type': 'ErrorInfo'},
}
def __init__(
self,
*,
tables: List["Table"],
statistics: Optional[object] = None,
render: Optional[object] = None,
error: Optional["ErrorInfo"] = None,
**kwargs
):
super(QueryResults, self).__init__(**kwargs)
self.tables = tables
self.statistics = statistics
self.render = render
self.error = error
class Response(msrest.serialization.Model):
"""The response to a metrics query.
All required parameters must be populated in order to send to Azure.
:param cost: The integer value representing the relative cost of the query.
:type cost: int
:param timespan: Required. The timespan for which the data was retrieved. Its value consists of
two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned
back from what was originally requested.
:type timespan: str
:param interval: The interval (window size) for which the metric data was returned in. This
may be adjusted in the future and returned back from what was originally requested. This is
not present if a metadata request was made.
:type interval: ~datetime.timedelta
:param namespace: The namespace of the metrics being queried.
:type namespace: str
:param resourceregion: The region of the resource being queried for metrics.
:type resourceregion: str
:param value: Required. the value of the collection.
:type value: list[~monitor_query_client.models.Metric]
"""
_validation = {
'cost': {'minimum': 0},
'timespan': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'cost': {'key': 'cost', 'type': 'int'},
'timespan': {'key': 'timespan', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'duration'},
'namespace': {'key': 'namespace', 'type': 'str'},
'resourceregion': {'key': 'resourceregion', 'type': 'str'},
'value': {'key': 'value', 'type': '[Metric]'},
}
def __init__(
self,
*,
timespan: str,
value: List["Metric"],
cost: Optional[int] = None,
interval: Optional[datetime.timedelta] = None,
namespace: Optional[str] = None,
resourceregion: Optional[str] = None,
**kwargs
):
super(Response, self).__init__(**kwargs)
self.cost = cost
self.timespan = timespan
self.interval = interval
self.namespace = namespace
self.resourceregion = resourceregion
self.value = value
class Table(msrest.serialization.Model):
"""Contains the columns and rows for one table in a query response.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the table.
:type name: str
:param columns: Required. The list of columns in this table.
:type columns: list[~monitor_query_client.models.Column]
:param rows: Required. The resulting rows from this query.
:type rows: list[list[object]]
"""
_validation = {
'name': {'required': True},
'columns': {'required': True},
'rows': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'columns': {'key': 'columns', 'type': '[Column]'},
'rows': {'key': 'rows', 'type': '[[object]]'},
}
def __init__(
self,
*,
name: str,
columns: List["Column"],
rows: List[List[object]],
**kwargs
):
super(Table, self).__init__(**kwargs)
self.name = name
self.columns = columns
self.rows = rows
class TimeSeriesElement(msrest.serialization.Model):
"""A time series result type. The discriminator value is always TimeSeries in this case.
:param metadatavalues: the metadata values returned if $filter was specified in the call.
:type metadatavalues: list[~monitor_query_client.models.MetadataValue]
:param data: An array of data points representing the metric values. This is only returned if
a result type of data is specified.
:type data: list[~monitor_query_client.models.MetricValue]
"""
_attribute_map = {
'metadatavalues': {'key': 'metadatavalues', 'type': '[MetadataValue]'},
'data': {'key': 'data', 'type': '[MetricValue]'},
}
def __init__(
self,
*,
metadatavalues: Optional[List["MetadataValue"]] = None,
data: Optional[List["MetricValue"]] = None,
**kwargs
):
super(TimeSeriesElement, self).__init__(**kwargs)
self.metadatavalues = metadatavalues
self.data = data | 0.92964 | 0.29328 |
import enum
from typing import Dict
import socket
import ipgetter2
import psutil
import orchard.extensions
class IPVersion(enum.Enum):
"""
Versions of the Internet Protocol.
"""
v4 = 4
v6 = 6
@orchard.extensions.cache.memoize()
def hostname() -> str:
"""
Get the host name of the system.
:return: The host name.
"""
return socket.gethostname()
@orchard.extensions.cache.memoize()
def external_ip_address() -> str:
"""
Get the IP address of the system as seen from outside the local network.
:return: The IP address
"""
ipgetter = ipgetter2.IPGetter()
addresses = ipgetter.get()
return str(addresses.v6) if str(addresses.v6) != '::' else str(addresses.v4)
def ip_address(interface: str, ip_version: IPVersion) -> str:
"""
Get the IP address in the given IP version for the specified interface.
:param interface: The interface for which the IP address will be returned.
:param ip_version: The version of the Internet Protocol.
:return: The associated IP address. If the interface does not exist, or if the specified
interface does not have an address in the given IP version, return an empty string.
"""
all_adresses = ip_addresses()
interface_addresses = all_adresses.get(interface, {})
return interface_addresses.get(ip_version, '')
def ip_addresses() -> Dict[str, Dict[IPVersion, str]]:
"""
Get the IPv4 and IPv6 addresses of all network interfaces (except the loopback interface).
:return: The outer dictionary's key is the name of the network interface, the corresponding
value is a dictionary with possibly two entries: one with key ``IPVersion.v4``
for the IPv4 address of the interface, the other with key ``'IPVersion.v6'`` for
the IPv6 address of the interface.
"""
unfiltered_addresses = psutil.net_if_addrs()
filtered_addresses = {}
for interface, addresses in unfiltered_addresses.items():
# Ignore the loopback interface.
if interface == 'lo':
continue
filtered_addresses[interface] = {}
for address in addresses:
# Add IPv4 and IPv6 addresses to the output.
if address.family == socket.AF_INET:
filtered_addresses[interface][IPVersion.v4] = address.address
elif address.family == socket.AF_INET6:
filtered_addresses[interface][IPVersion.v6] = address.address
return filtered_addresses | orchard/system_status/system/network.py | import enum
from typing import Dict
import socket
import ipgetter2
import psutil
import orchard.extensions
class IPVersion(enum.Enum):
"""
Versions of the Internet Protocol.
"""
v4 = 4
v6 = 6
@orchard.extensions.cache.memoize()
def hostname() -> str:
"""
Get the host name of the system.
:return: The host name.
"""
return socket.gethostname()
@orchard.extensions.cache.memoize()
def external_ip_address() -> str:
"""
Get the IP address of the system as seen from outside the local network.
:return: The IP address
"""
ipgetter = ipgetter2.IPGetter()
addresses = ipgetter.get()
return str(addresses.v6) if str(addresses.v6) != '::' else str(addresses.v4)
def ip_address(interface: str, ip_version: IPVersion) -> str:
"""
Get the IP address in the given IP version for the specified interface.
:param interface: The interface for which the IP address will be returned.
:param ip_version: The version of the Internet Protocol.
:return: The associated IP address. If the interface does not exist, or if the specified
interface does not have an address in the given IP version, return an empty string.
"""
all_adresses = ip_addresses()
interface_addresses = all_adresses.get(interface, {})
return interface_addresses.get(ip_version, '')
def ip_addresses() -> Dict[str, Dict[IPVersion, str]]:
"""
Get the IPv4 and IPv6 addresses of all network interfaces (except the loopback interface).
:return: The outer dictionary's key is the name of the network interface, the corresponding
value is a dictionary with possibly two entries: one with key ``IPVersion.v4``
for the IPv4 address of the interface, the other with key ``'IPVersion.v6'`` for
the IPv6 address of the interface.
"""
unfiltered_addresses = psutil.net_if_addrs()
filtered_addresses = {}
for interface, addresses in unfiltered_addresses.items():
# Ignore the loopback interface.
if interface == 'lo':
continue
filtered_addresses[interface] = {}
for address in addresses:
# Add IPv4 and IPv6 addresses to the output.
if address.family == socket.AF_INET:
filtered_addresses[interface][IPVersion.v4] = address.address
elif address.family == socket.AF_INET6:
filtered_addresses[interface][IPVersion.v6] = address.address
return filtered_addresses | 0.841109 | 0.298044 |
import cx_Oracle
from utils.logger.log import log
class oracle:
def __init__(self, configs):
self.logger = log.get_logger(category="oracle")
_user = configs["user"]
_password = configs["password"]
_host = configs["host"]
_port = configs["port"]
_sid = configs["sid"]
_min = configs["pool_min"]
_max = configs["pool_max"]
self.__connect(_user, _password, _host, _port, _sid, _min, _max)
def __connect(self, user, password, host, port, sid, _min, _max):
dsn = cx_Oracle.makedsn(host, port, sid)
self.logger.info("start connect oracle database [ user=%s, host=%s, port=%s ]", user, host, port)
self.pool = cx_Oracle.SessionPool(user=user, password=password, dsn=dsn, min=_min, max=_max, increment=1)
def get_cnx(self):
acq = self.pool.acquire()
return acq
def __release_cnx(self, cnx):
self.pool.release(cnx)
# 判断是否存在记录
def is_exist(self, sql, params):
res = self.select(sql=sql, params=params)
if len(res) > 0:
return True
else:
return False
# 查询
def select(self, sql, params=None):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
fc = cursor.fetchall()
return fc
except Exception as err:
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx)
# 执行
def execute(self, sql, params=None):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
cnx.commit()
except Exception as err:
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx)
# 批量执行
def executemany(self, sql, params):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
cursor.prepare(sql)
cursor.executemany(None, params)
cnx.commit()
except Exception as err:
cnx.rollback()
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx) | utils/database/oracle_tool.py | import cx_Oracle
from utils.logger.log import log
class oracle:
def __init__(self, configs):
self.logger = log.get_logger(category="oracle")
_user = configs["user"]
_password = configs["password"]
_host = configs["host"]
_port = configs["port"]
_sid = configs["sid"]
_min = configs["pool_min"]
_max = configs["pool_max"]
self.__connect(_user, _password, _host, _port, _sid, _min, _max)
def __connect(self, user, password, host, port, sid, _min, _max):
dsn = cx_Oracle.makedsn(host, port, sid)
self.logger.info("start connect oracle database [ user=%s, host=%s, port=%s ]", user, host, port)
self.pool = cx_Oracle.SessionPool(user=user, password=password, dsn=dsn, min=_min, max=_max, increment=1)
def get_cnx(self):
acq = self.pool.acquire()
return acq
def __release_cnx(self, cnx):
self.pool.release(cnx)
# 判断是否存在记录
def is_exist(self, sql, params):
res = self.select(sql=sql, params=params)
if len(res) > 0:
return True
else:
return False
# 查询
def select(self, sql, params=None):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
fc = cursor.fetchall()
return fc
except Exception as err:
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx)
# 执行
def execute(self, sql, params=None):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
cnx.commit()
except Exception as err:
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx)
# 批量执行
def executemany(self, sql, params):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
cursor.prepare(sql)
cursor.executemany(None, params)
cnx.commit()
except Exception as err:
cnx.rollback()
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx) | 0.177775 | 0.062617 |
import warnings
import argparse
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from dogsvscats.data import get_datasets
from dogsvscats.model import train_model, load_model, MODELS
from dogsvscats.callbacks import EarlyStopping
from dogsvscats import config
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser()
parser.add_argument(
"-m",
"--model",
default=config.MODEL_NAME,
choices=MODELS,
help="Model name",
type=str,
)
parser.add_argument(
"-cp",
"--checkpoint-path",
default=config.CHECKPOINT_PATH,
help="Checkpoint Path",
type=str,
)
parser.add_argument("-w", "--workers", default=config.NW, help="Workers", type=int)
parser.add_argument(
"-bs", "--batch-size", default=config.BS, help="Batch size", type=int
)
parser.add_argument(
"-lr", "--learning-rate", default=config.LR, help="Learning rate", type=float
)
parser.add_argument("-e", "--epochs", default=config.EPOCHS, help="Epochs", type=int)
parser.add_argument(
"-sp",
"--scheduler-patience",
default=config.SCHEDULER_PATIENCE,
help="Scheduler patience",
type=int,
)
parser.add_argument(
"-esp",
"--early-stopping-patience",
default=config.EARLYSTOPPING_PATIENCE,
help="Early stopping patience",
type=int,
)
parser.add_argument("-d", "--debug", default=False, help="Debug", action="store_true")
parser.add_argument(
"-df", "--debug-frac", default=0.05, help="Debug fraction", type=float
)
parser.add_argument(
"-vf",
"--valid-frac",
default=config.VALID_FRAC,
help="Validation fraction",
type=float,
)
args = parser.parse_args()
train_ds, valid_ds, _ = get_datasets(
valid_frac=args.valid_frac, debug=args.debug, debug_frac=args.debug_frac
)
train_dl = DataLoader(train_ds, args.batch_size, shuffle=True, num_workers=args.workers)
valid_dl = DataLoader(valid_ds, args.batch_size, shuffle=True, num_workers=args.workers)
model = load_model(args.model)
optimizer = optim.SGD(model.parameters(), args.learning_rate, momentum=0.9)
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode="max", patience=args.scheduler_patience, verbose=True
)
es = EarlyStopping(
patience=args.early_stopping_patience,
mode="max",
verbose=True,
path=args.checkpoint_path,
)
model = train_model(model, optimizer, scheduler, es, train_dl, valid_dl, args.epochs) | dogsvscats/train.py | import warnings
import argparse
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from dogsvscats.data import get_datasets
from dogsvscats.model import train_model, load_model, MODELS
from dogsvscats.callbacks import EarlyStopping
from dogsvscats import config
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser()
parser.add_argument(
"-m",
"--model",
default=config.MODEL_NAME,
choices=MODELS,
help="Model name",
type=str,
)
parser.add_argument(
"-cp",
"--checkpoint-path",
default=config.CHECKPOINT_PATH,
help="Checkpoint Path",
type=str,
)
parser.add_argument("-w", "--workers", default=config.NW, help="Workers", type=int)
parser.add_argument(
"-bs", "--batch-size", default=config.BS, help="Batch size", type=int
)
parser.add_argument(
"-lr", "--learning-rate", default=config.LR, help="Learning rate", type=float
)
parser.add_argument("-e", "--epochs", default=config.EPOCHS, help="Epochs", type=int)
parser.add_argument(
"-sp",
"--scheduler-patience",
default=config.SCHEDULER_PATIENCE,
help="Scheduler patience",
type=int,
)
parser.add_argument(
"-esp",
"--early-stopping-patience",
default=config.EARLYSTOPPING_PATIENCE,
help="Early stopping patience",
type=int,
)
parser.add_argument("-d", "--debug", default=False, help="Debug", action="store_true")
parser.add_argument(
"-df", "--debug-frac", default=0.05, help="Debug fraction", type=float
)
parser.add_argument(
"-vf",
"--valid-frac",
default=config.VALID_FRAC,
help="Validation fraction",
type=float,
)
args = parser.parse_args()
train_ds, valid_ds, _ = get_datasets(
valid_frac=args.valid_frac, debug=args.debug, debug_frac=args.debug_frac
)
train_dl = DataLoader(train_ds, args.batch_size, shuffle=True, num_workers=args.workers)
valid_dl = DataLoader(valid_ds, args.batch_size, shuffle=True, num_workers=args.workers)
model = load_model(args.model)
optimizer = optim.SGD(model.parameters(), args.learning_rate, momentum=0.9)
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode="max", patience=args.scheduler_patience, verbose=True
)
es = EarlyStopping(
patience=args.early_stopping_patience,
mode="max",
verbose=True,
path=args.checkpoint_path,
)
model = train_model(model, optimizer, scheduler, es, train_dl, valid_dl, args.epochs) | 0.665628 | 0.220143 |
import fcntl
import os
import os.path
import socket
import tempfile
import time
import warnings
from datetime import datetime
from datetime import timedelta
from tornado.escape import json_encode
from tornado.escape import to_unicode
from tornado.escape import utf8
from tornado.ioloop import IOLoop
from webtiles import config
class WebtilesSocketConnection(object):
def __init__(self, socketpath, logger):
self.crawl_socketpath = socketpath
self.logger = logger
self.message_callback = None
self.socket = None
self.socketpath = None
self.open = False
self.close_callback = None
self.msg_buffer = None
def connect(self, primary = True):
if not os.path.exists(self.crawl_socketpath):
# Wait until the socket exists
IOLoop.current().add_timeout(time.time() + 1, self.connect)
return
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.socket.settimeout(10)
# Set close-on-exec
flags = fcntl.fcntl(self.socket.fileno(), fcntl.F_GETFD)
fcntl.fcntl(self.socket.fileno(), flags | fcntl.FD_CLOEXEC)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if (self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) < 2048):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2048)
# on linux, the following may have no effect (setting SO_RCVBUF is
# often documented as having no effect), but on other unixes, it
# matters quite a bit. The choice of 212992 is based on the linux
# default.
if (self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) < 212992):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 212992)
# Bind to a temp path
# there's a race condition here...
# note that mktmp here is deprecated, and we may eventually need to
# do something different. One simple idea is to keep sockets in a
# temporary directory generated from tempfile calls (i.e
# tempfile.mkdtemp), but use our own naming scheme. Because this is a
# socket, regular calls in tempfile are not appropriate.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.socketpath = tempfile.mktemp(dir=config.get('server_socket_path'),
prefix="crawl", suffix=".socket")
self.socket.bind(self.socketpath)
# Install handler
IOLoop.current().add_handler(self.socket.fileno(),
self._handle_read,
IOLoop.ERROR | IOLoop.READ)
msg = json_encode({
"msg": "attach",
"primary": primary
})
self.open = True
self.send_message(utf8(msg))
def _handle_read(self, fd, events):
if events & IOLoop.READ:
data = self.socket.recv(128 * 1024, socket.MSG_DONTWAIT)
self._handle_data(data)
if events & IOLoop.ERROR:
pass
def _handle_data(self, data): # type: (bytes) -> None
if self.msg_buffer is not None:
data = self.msg_buffer + data
# TODO: is this check safe? Decoding won't always work for
# fragmented messages...
if data[-1] != b'\n'[0]:
# All messages from crawl end with \n.
# If this one doesn't, it's fragmented.
self.msg_buffer = data
else:
self.msg_buffer = None
if self.message_callback:
self.message_callback(to_unicode(data))
def send_message(self, data): # type: (str) -> None
start = datetime.now()
try:
self.socket.sendto(utf8(data), self.crawl_socketpath)
except socket.timeout:
self.logger.warning("Game socket send timeout", exc_info=True)
self.close()
return
end = datetime.now()
if end - start >= timedelta(seconds=1):
self.logger.warning("Slow socket send: " + str(end - start))
def close(self):
if self.socket:
IOLoop.current().remove_handler(self.socket.fileno())
self.socket.close()
os.remove(self.socketpath)
self.socket = None
if self.close_callback:
self.close_callback() | crawl-ref/source/webserver/webtiles/connection.py | import fcntl
import os
import os.path
import socket
import tempfile
import time
import warnings
from datetime import datetime
from datetime import timedelta
from tornado.escape import json_encode
from tornado.escape import to_unicode
from tornado.escape import utf8
from tornado.ioloop import IOLoop
from webtiles import config
class WebtilesSocketConnection(object):
def __init__(self, socketpath, logger):
self.crawl_socketpath = socketpath
self.logger = logger
self.message_callback = None
self.socket = None
self.socketpath = None
self.open = False
self.close_callback = None
self.msg_buffer = None
def connect(self, primary = True):
if not os.path.exists(self.crawl_socketpath):
# Wait until the socket exists
IOLoop.current().add_timeout(time.time() + 1, self.connect)
return
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.socket.settimeout(10)
# Set close-on-exec
flags = fcntl.fcntl(self.socket.fileno(), fcntl.F_GETFD)
fcntl.fcntl(self.socket.fileno(), flags | fcntl.FD_CLOEXEC)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if (self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) < 2048):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2048)
# on linux, the following may have no effect (setting SO_RCVBUF is
# often documented as having no effect), but on other unixes, it
# matters quite a bit. The choice of 212992 is based on the linux
# default.
if (self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) < 212992):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 212992)
# Bind to a temp path
# there's a race condition here...
# note that mktmp here is deprecated, and we may eventually need to
# do something different. One simple idea is to keep sockets in a
# temporary directory generated from tempfile calls (i.e
# tempfile.mkdtemp), but use our own naming scheme. Because this is a
# socket, regular calls in tempfile are not appropriate.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.socketpath = tempfile.mktemp(dir=config.get('server_socket_path'),
prefix="crawl", suffix=".socket")
self.socket.bind(self.socketpath)
# Install handler
IOLoop.current().add_handler(self.socket.fileno(),
self._handle_read,
IOLoop.ERROR | IOLoop.READ)
msg = json_encode({
"msg": "attach",
"primary": primary
})
self.open = True
self.send_message(utf8(msg))
def _handle_read(self, fd, events):
if events & IOLoop.READ:
data = self.socket.recv(128 * 1024, socket.MSG_DONTWAIT)
self._handle_data(data)
if events & IOLoop.ERROR:
pass
def _handle_data(self, data): # type: (bytes) -> None
if self.msg_buffer is not None:
data = self.msg_buffer + data
# TODO: is this check safe? Decoding won't always work for
# fragmented messages...
if data[-1] != b'\n'[0]:
# All messages from crawl end with \n.
# If this one doesn't, it's fragmented.
self.msg_buffer = data
else:
self.msg_buffer = None
if self.message_callback:
self.message_callback(to_unicode(data))
def send_message(self, data): # type: (str) -> None
start = datetime.now()
try:
self.socket.sendto(utf8(data), self.crawl_socketpath)
except socket.timeout:
self.logger.warning("Game socket send timeout", exc_info=True)
self.close()
return
end = datetime.now()
if end - start >= timedelta(seconds=1):
self.logger.warning("Slow socket send: " + str(end - start))
def close(self):
if self.socket:
IOLoop.current().remove_handler(self.socket.fileno())
self.socket.close()
os.remove(self.socketpath)
self.socket = None
if self.close_callback:
self.close_callback() | 0.224735 | 0.056444 |
import numpy as np
import cv2
from typing import Any, Dict, List, Optional, Type, Union
import gym
import habitat
from habitat.config import Config
from habitat.core.dataset import Dataset, Episode
from habitat.core.simulator import Observations
class ModifiedEnvForVis(habitat.Env):
def reset_to_episode(self, target_episode: Episode) -> Observations:
r"""Resets the environments and returns the initial observations.
:return: initial observations from the environment.
"""
self._reset_stats()
assert len(self.episodes) > 0, "Episodes list is empty"
if self._current_episode is not None:
self._current_episode._shortest_path_cache = None
# Delete the shortest path cache of the current episode
# Caching it for the next time we see this episode isn't really worth
# it
if self._current_episode is not None:
self._current_episode._shortest_path_cache = None
# self._current_episode = next(self._episode_iterator)
self._current_episode = target_episode
self.reconfigure(self._config)
observations = self.task.reset(episode=self.current_episode)
self._task.measurements.reset_measures(
episode=self.current_episode, task=self.task
)
return observations
class ModifiedRLEnvForVis(habitat.RLEnv):
r"""Reinforcement Learning (RL) environment class which subclasses ``gym.Env``.
This is a wrapper over :ref:`Env` for RL users. To create custom RL
environments users should subclass `RLEnv` and define the following
methods: :ref:`get_reward_range()`, :ref:`get_reward()`,
:ref:`get_done()`, :ref:`get_info()`.
As this is a subclass of ``gym.Env``, it implements `reset()` and
`step()`.
"""
_env: ModifiedEnvForVis
def __init__(self, config: Config, dataset: Optional[Dataset] = None) -> None:
"""Constructor
:param config: config to construct :ref:`Env`
:param dataset: dataset to construct :ref:`Env`.
"""
self._env = ModifiedEnvForVis(config, dataset)
self.observation_space = self._env.observation_space
self.action_space = self._env.action_space
self.number_of_episodes = self._env.number_of_episodes
self.reward_range = self.get_reward_range()
def reset_to_episode(self, target_episode: Episode) -> Observations:
return self._env.reset_to_episode(target_episode)
class SimpleRLEnvForVis(ModifiedRLEnvForVis):
def get_reward_range(self):
return [-1, 1]
def get_reward(self, observations):
return 0
def get_done(self, observations):
return self.habitat_env.episode_over
def get_info(self, observations):
return self.habitat_env.get_metrics() | pointnav_vo/vis/modified_env.py | import numpy as np
import cv2
from typing import Any, Dict, List, Optional, Type, Union
import gym
import habitat
from habitat.config import Config
from habitat.core.dataset import Dataset, Episode
from habitat.core.simulator import Observations
class ModifiedEnvForVis(habitat.Env):
def reset_to_episode(self, target_episode: Episode) -> Observations:
r"""Resets the environments and returns the initial observations.
:return: initial observations from the environment.
"""
self._reset_stats()
assert len(self.episodes) > 0, "Episodes list is empty"
if self._current_episode is not None:
self._current_episode._shortest_path_cache = None
# Delete the shortest path cache of the current episode
# Caching it for the next time we see this episode isn't really worth
# it
if self._current_episode is not None:
self._current_episode._shortest_path_cache = None
# self._current_episode = next(self._episode_iterator)
self._current_episode = target_episode
self.reconfigure(self._config)
observations = self.task.reset(episode=self.current_episode)
self._task.measurements.reset_measures(
episode=self.current_episode, task=self.task
)
return observations
class ModifiedRLEnvForVis(habitat.RLEnv):
r"""Reinforcement Learning (RL) environment class which subclasses ``gym.Env``.
This is a wrapper over :ref:`Env` for RL users. To create custom RL
environments users should subclass `RLEnv` and define the following
methods: :ref:`get_reward_range()`, :ref:`get_reward()`,
:ref:`get_done()`, :ref:`get_info()`.
As this is a subclass of ``gym.Env``, it implements `reset()` and
`step()`.
"""
_env: ModifiedEnvForVis
def __init__(self, config: Config, dataset: Optional[Dataset] = None) -> None:
"""Constructor
:param config: config to construct :ref:`Env`
:param dataset: dataset to construct :ref:`Env`.
"""
self._env = ModifiedEnvForVis(config, dataset)
self.observation_space = self._env.observation_space
self.action_space = self._env.action_space
self.number_of_episodes = self._env.number_of_episodes
self.reward_range = self.get_reward_range()
def reset_to_episode(self, target_episode: Episode) -> Observations:
return self._env.reset_to_episode(target_episode)
class SimpleRLEnvForVis(ModifiedRLEnvForVis):
def get_reward_range(self):
return [-1, 1]
def get_reward(self, observations):
return 0
def get_done(self, observations):
return self.habitat_env.episode_over
def get_info(self, observations):
return self.habitat_env.get_metrics() | 0.932592 | 0.48182 |
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
:param y_true: list of true labels
:param y_pred: list of predicted labels
:param classes: list of strings containing names of the classes
:param normalize: boolen to normalize percentages
:param title: string
:param cmap: colormap
:return:
"""
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return
def plot_metrics(metrics, acc, classes, full_acq=False):
"""
This function prints and displays the metrics as a table
:param metrics: array of metrics recall and precision
:param acc: accuracy
:param classes: names of the classes
:param full_acq: boolean to change the title of the figure
:return:
"""
colors = [(1, 1, 1), (1, 1, 1), (1, 1, 1)]
cm = LinearSegmentedColormap.from_list("white", colors, N=1)
fig, ax = plt.subplots(figsize=(4, 4))
im = ax.imshow(metrics, interpolation=None, cmap=cm)
# We want to show all ticks...
if full_acq:
title = "Accuracy over acquisitions = " + str(int(10000 * acc) / 100) + "%"
else:
title = "Accuracy over slices = " + str(int(10000 * acc) / 100) + "%"
ax.set(xticks=np.arange(metrics.shape[1]),
yticks=np.arange(metrics.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=["Recall", "Precision"],
title=title
)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f'
for i in range(metrics.shape[0]):
for j in range(metrics.shape[1]):
ax.text(j, i, format(metrics[i, j], fmt),
ha="center", va="center",
color="black")
fig.tight_layout()
return ax | classifier/utils.py | import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
:param y_true: list of true labels
:param y_pred: list of predicted labels
:param classes: list of strings containing names of the classes
:param normalize: boolen to normalize percentages
:param title: string
:param cmap: colormap
:return:
"""
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return
def plot_metrics(metrics, acc, classes, full_acq=False):
"""
This function prints and displays the metrics as a table
:param metrics: array of metrics recall and precision
:param acc: accuracy
:param classes: names of the classes
:param full_acq: boolean to change the title of the figure
:return:
"""
colors = [(1, 1, 1), (1, 1, 1), (1, 1, 1)]
cm = LinearSegmentedColormap.from_list("white", colors, N=1)
fig, ax = plt.subplots(figsize=(4, 4))
im = ax.imshow(metrics, interpolation=None, cmap=cm)
# We want to show all ticks...
if full_acq:
title = "Accuracy over acquisitions = " + str(int(10000 * acc) / 100) + "%"
else:
title = "Accuracy over slices = " + str(int(10000 * acc) / 100) + "%"
ax.set(xticks=np.arange(metrics.shape[1]),
yticks=np.arange(metrics.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=["Recall", "Precision"],
title=title
)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f'
for i in range(metrics.shape[0]):
for j in range(metrics.shape[1]):
ax.text(j, i, format(metrics[i, j], fmt),
ha="center", va="center",
color="black")
fig.tight_layout()
return ax | 0.935964 | 0.694691 |
"""THOR"""
import numpy as np
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common.tensor import Tensor
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator
from mindspore.nn.optim.optimizer import Optimizer
from mindspore.parallel._utils import _get_device_num, _get_gradients_mean
from mindspore import context
from mindspore.context import ParallelMode
from mindspore.nn.layer import Dense_Thor, Conv2d_Thor, Embedding_Thor
from mindspore.nn.wrap import DistributedGradReducer
from mindspore.train.train_thor.convert_utils import ConvertNetUntils
from mindspore.parallel._auto_parallel_context import auto_parallel_context
# Enumerates types of Layer
Other = -1
Conv = 1
FC = 2
Embedding = 3
LayerNorm = 4
BatchNorm = 5
_momentum_opt = C.MultitypeFuncGraph("momentum_opt")
op_add = P.AddN()
apply_decay = C.MultitypeFuncGraph("apply_decay")
@apply_decay.register("Number", "Bool", "Tensor", "Tensor")
def _tensor_apply_decay(weight_decay, if_apply, weight, gradient):
"""Get grad with weight_decay."""
if if_apply:
return op_add((weight * weight_decay, gradient))
return gradient
@_momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment):
"""Apply momentum optimizer to the weight parameter using Tensor."""
success = True
success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum))
return success
C0 = 16
def caculate_device_shape(matrix_dim, channel, is_A):
ll = (0)
if is_A:
if channel // C0 == 0:
matrix_dim = (matrix_dim / channel) * C0
ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim)
else:
ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim)
return ll
def caculate_matmul_shape(matrix_A_dim, matrix_G_dim, split_dim):
"""get matmul shape"""
split_dimA = split_dim
split_dimG = split_dim
if matrix_A_dim % split_dim == 0:
batch_w = matrix_A_dim // split_dim
else:
if matrix_A_dim < split_dim:
batch_w = 1
split_dimA = matrix_A_dim
else:
batch_w = matrix_A_dim // split_dim + 1
if matrix_G_dim % split_dim == 0:
batch_h = matrix_G_dim // split_dim
else:
if matrix_G_dim < split_dim:
batch_h = 1
split_dimG = matrix_G_dim
else:
batch_h = matrix_G_dim // split_dim + 1
matrix_A_shape = (batch_h, batch_w, split_dimA, split_dimA)
matrix_G_shape = (batch_h, split_dimG, split_dimG)
return matrix_A_shape, matrix_G_shape
def find_net_layertype_recur(net, layertype_map):
"""get net layer type recursively."""
cells = net.name_cells()
for name in cells:
subcell = cells[name]
print("thor subcell name: ", name)
if subcell == net:
continue
elif isinstance(subcell, Conv2d_Thor):
layertype_map.append(Conv)
elif isinstance(subcell, Dense_Thor):
layertype_map.append(FC)
elif isinstance(subcell, Embedding_Thor):
layertype_map.append(Embedding)
elif isinstance(subcell, nn.LayerNorm):
layertype_map.append(LayerNorm)
elif isinstance(subcell, nn.BatchNorm2d):
layertype_map.append(BatchNorm)
elif isinstance(subcell, (nn.Conv2d, nn.Dense, nn.Embedding, nn.Conv2dTranspose, nn.Conv1d, nn.Conv1dTranspose,
nn.BatchNorm1d, nn.GroupNorm, nn.GlobalBatchNorm)):
layertype_map.append(Other)
else:
find_net_layertype_recur(subcell, layertype_map)
def get_net_layertype_mask(net):
layertype_map = []
find_net_layertype_recur(net, layertype_map)
return layertype_map
def get_layer_counter(layer_type, layer_counter, params, idx):
"""get layer counter"""
if layer_type in [Conv, FC, LayerNorm, BatchNorm]:
if layer_type in [LayerNorm, BatchNorm]:
if "beta" in params[idx].name.lower():
layer_counter = layer_counter + 1
else:
if "bias" in params[idx].name.lower():
layer_counter = layer_counter + 1
else:
if idx < len(params) - 1 and "bias" not in params[idx + 1].name.lower():
layer_counter = layer_counter + 1
else:
layer_counter = layer_counter + 1
return layer_counter
def THOR(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,
use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None):
context.set_context(max_call_depth=10000)
ConvertNetUntils().convert_to_thor_net(net)
if context.get_context("device_target") == "Ascend":
return THOR_Ascend(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, decay_filter,
split_indices=split_indices)
return THOR_GPU(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size,
use_nesterov, decay_filter, split_indices=split_indices)
class THOR_GPU(Optimizer):
"""
THOR_GPU
"""
def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,
use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None):
params = filter(lambda x: x.requires_grad, net.get_parameters())
super(THOR_GPU, self).__init__(learning_rate, params, weight_decay, loss_scale)
Validator.check_value_type("momentum", momentum, [float], self.cls_name)
if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
self.params = self.parameters
self.use_nesterov = Validator.check_bool(use_nesterov)
self.moments = self.params.clone(prefix="moments", init='zeros')
self.hyper_map = C.HyperMap()
self.opt = P.ApplyMomentum(use_nesterov=self.use_nesterov)
self.net = net
self.matrix_A_cov = ParameterTuple(filter(lambda x: 'matrix_A' in x.name, net.get_parameters()))
self.matrix_G_cov = ParameterTuple(filter(lambda x: 'matrix_G' in x.name, net.get_parameters()))
self.A_normalizer = ParameterTuple(filter(lambda x: 'A_normalizer' in x.name, net.get_parameters()))
self.G_normalizer = ParameterTuple(filter(lambda x: 'G_normalizer' in x.name, net.get_parameters()))
self.transpose = P.Transpose()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.matmul = P.MatMul()
self.assign = P.Assign()
self.mul = P.Mul()
self.damping = damping
self.gather = P.GatherV2()
self.one = Tensor(1, mstype.int32)
self.batch_size = Tensor(batch_size, mstype.float32)
self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)
self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)
self.feature_map = Tensor(1.0, mstype.float32)
self.axis = 0
self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
self.cast = P.Cast()
self.sqrt = P.Sqrt()
self.eye = P.Eye()
split_dim = 128
self.embedding_cholesky = P.CholeskyTrsm()
self.cholesky = P.CholeskyTrsm(split_dim=split_dim)
self.vector_matmul = P.BatchMatMul(transpose_a=True)
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.inv = P.Reciprocal()
self.square = P.Square()
self.expand = P.ExpandDims()
self.thor = True
self.matrix_A = ()
self.matrix_G = ()
self.matrix_A_shape = ()
self.thor_layer_count = 0
self.conv_layer_count = 0
self.weight_fim_idx_map = ()
self.weight_conv_idx_map = ()
self.weight_layerType_idx_map = ()
layer_type_map = get_net_layertype_mask(net)
layer_counter = 0
for idx in range(len(self.params)):
layer_type = layer_type_map[layer_counter]
weight = self.params[idx]
weight_shape = self.shape(weight)
if layer_type in [Conv, FC] and "bias" not in self.params[idx].name.lower():
in_channels = weight_shape[1]
out_channels = weight_shape[0]
matrix_A_dim = in_channels
if layer_type == Conv:
matrix_A_dim = in_channels * weight_shape[2] * weight_shape[3]
matrix_G_dim = out_channels
matrix_A_shape, matrix_G_shape = caculate_matmul_shape(matrix_A_dim, matrix_G_dim, split_dim)
matrix_A_inv = Parameter(np.zeros(matrix_A_shape).astype(np.float32),
name='matrix_A_inv_' + str(self.thor_layer_count), requires_grad=False)
matrix_G_inv = Parameter(np.zeros(matrix_G_shape).astype(np.float32),
name="matrix_G_inv_" + str(self.thor_layer_count), requires_grad=False)
self.matrix_A = self.matrix_A + (matrix_A_inv,)
self.matrix_G = self.matrix_G + (matrix_G_inv,)
self.matrix_A_shape = self.matrix_A_shape + (matrix_A_shape,)
elif layer_type == Embedding:
vocab_size = weight_shape[0]
embedding_size = weight_shape[1]
matrix_A_inv = Parameter(Tensor(np.zeros([vocab_size]).astype(np.float32)),
name='matrix_A_inv_' + str(self.thor_layer_count), requires_grad=False)
matrix_G_inv = Parameter(Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float32)),
name="matrix_G_inv_" + str(self.thor_layer_count), requires_grad=False)
self.matrix_A = self.matrix_A + (matrix_A_inv,)
self.matrix_G = self.matrix_G + (matrix_G_inv,)
self.matrix_A_shape = self.matrix_A_shape + ((vocab_size,),)
if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower():
self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (layer_type,)
self.thor_layer_count = self.thor_layer_count + 1
if layer_type == Conv:
self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)
self.conv_layer_count = self.conv_layer_count + 1
else:
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
else:
self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
if layer_type == LayerNorm:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (LayerNorm,)
else:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (Other,)
# bert.cls1.output_bias: not a network layer, only a trainable param
if "output_bias" not in self.params[idx].name.lower():
layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)
self.matrix_A = ParameterTuple(self.matrix_A)
self.matrix_G = ParameterTuple(self.matrix_G)
self.weight_decay = weight_decay
self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
self.update_gradient = P.UpdateThorGradient(split_dim=split_dim)
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
if self.is_distributed:
mean = _get_gradients_mean()
degree = _get_device_num()
if self.conv_layer_count > 0:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.matrix_A) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum2")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum4")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8")
self.grad_reducer_Amax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=2)
self.grad_reducer_Gmax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=4)
self.grad_reducer_A = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=6)
self.grad_reducer_G = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=8)
else:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.params) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum3")
self.grad_reducer_g = DistributedGradReducer(self.params, mean, degree, fusion_type=3)
def _get_Ainv_Ginv_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce):
"""get matrixA inverse list and matrix G inverse list"""
for i in range(len(self.params)):
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if layer_type in [Conv, FC, Embedding]:
g = gradients[i]
matrix_A = self.matrix_A_cov[thor_layer_count]
matrix_G = self.matrix_G_cov[thor_layer_count]
matrix_A = F.depend(matrix_A, g)
matrix_G = F.depend(matrix_G, g)
dampingA = damping_step
dampingG = damping_step
feature_map = self.feature_map
if layer_type == Conv:
A_normalizer = self.A_normalizer[conv_layer_count]
G_normalizer = self.G_normalizer[conv_layer_count]
A_normalizer = F.depend(A_normalizer, g)
G_normalizer = F.depend(G_normalizer, g)
dampingA = self.mul(damping_step, 1.0 / A_normalizer)
dampingG = self.mul(damping_step, 1.0 / G_normalizer)
feature_map = self.sqrt(1.0 / A_normalizer)
A_shape = self.shape(matrix_A)
A_eye = self.eye(A_shape[0], A_shape[0], mstype.float32)
dampingA = self.sqrt(dampingA)
dampingG = self.sqrt(dampingG)
G_shape = self.shape(matrix_G)
G_eye = self.eye(G_shape[0], G_shape[1], mstype.float32)
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + dampingG * G_eye
if layer_type == Embedding:
A_eye = P.OnesLike()(matrix_A)
matrix_A = self.mul(matrix_A, 1.0 / self.batch_size)
matrix_A = matrix_A + dampingA * A_eye
matrix_A = self.inv(matrix_A)
matrix_G = self.embedding_cholesky(matrix_G)
matrix_G = self.matmul(matrix_G, matrix_G)
else:
matrix_A = matrix_A + dampingA * A_eye
matrix_A = self.cholesky(matrix_A)
matrix_A = self.vector_matmul(matrix_A, matrix_A)
matrix_A = P.BroadcastTo(self.matrix_A_shape[thor_layer_count])(matrix_A)
matrix_G = self.cholesky(matrix_G)
matrix_G = self.vector_matmul(matrix_G, matrix_G)
matrix_A = self.mul(matrix_A, feature_map)
matrix_G = self.mul(matrix_G, feature_map)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G,)
return matrix_a_allreduce, matrix_g_allreduce
def construct(self, gradients):
params = self.params
moments = self.moments
gradients = self.scale_grad(gradients)
damping_step = self.gather(self.damping, self.cov_step, self.axis)
damping_step = self.cast(damping_step, mstype.float32)
new_grads = ()
if self.thor:
matrix_Ainv_list = ()
matrix_Ginv_list = ()
matrix_A_allreduce, matrix_G_allreduce = self._get_Ainv_Ginv_list(gradients, damping_step,
matrix_Ainv_list, matrix_Ginv_list)
if self.is_distributed and self.conv_layer_count > 0:
matrix_A_allreduce = self.grad_reducer_A(matrix_A_allreduce)
matrix_G_allreduce = self.grad_reducer_G(matrix_G_allreduce)
for i in range(len(self.params)):
g = gradients[i]
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if layer_type in [Conv, FC]:
g_shape = self.shape(g)
g = self.reshape(g, (g_shape[0], -1))
matrix_A = matrix_A_allreduce[thor_layer_count]
matrix_G = matrix_G_allreduce[thor_layer_count]
g = self.update_gradient(matrix_G, g, matrix_A)
fake_A = self.assign(self.matrix_A[thor_layer_count], matrix_A)
fake_G = self.assign(self.matrix_G[thor_layer_count], matrix_G)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
if conv_layer_count != -1:
g = self.reshape(g, g_shape)
elif layer_type == Embedding:
matrix_A = matrix_A_allreduce[thor_layer_count]
matrix_G = matrix_G_allreduce[thor_layer_count]
fake_A = self.assign(self.matrix_A[thor_layer_count], matrix_A)
fake_G = self.assign(self.matrix_G[thor_layer_count], matrix_G)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
temp_a = self.expand(matrix_A, 1)
g = self.mul(temp_a, g)
g = self.matmul(g, matrix_G)
elif layer_type == LayerNorm:
damping = self.sqrt(damping_step)
normalizer = self.batch_size
normalizer = self.cast(normalizer, mstype.float32)
fim_cov = self.square(g)
fim_cov = self.mul(fim_cov, 1.0 / normalizer)
fim_cov = fim_cov + damping
fim_inv = self.inv(fim_cov)
g = self.mul(fim_inv, g)
new_grads = new_grads + (g,)
else:
for j in range(len(self.params)):
g = gradients[j]
thor_layer_count = self.weight_fim_idx_map[j]
conv_layer_count = self.weight_conv_idx_map[j]
layer_type = self.weight_layerType_idx_map[j]
if layer_type in [Conv, FC]:
g_shape = self.shape(g)
g = self.reshape(g, (g_shape[0], -1))
matrix_A = self.matrix_A[thor_layer_count]
matrix_G = self.matrix_G[thor_layer_count]
g = self.update_gradient(matrix_G, g, matrix_A)
if conv_layer_count != -1:
g = self.reshape(g, g_shape)
elif layer_type == Embedding:
matrix_A = self.matrix_A[thor_layer_count]
matrix_G = self.matrix_G[thor_layer_count]
g = gradients[j]
temp_a = self.expand(matrix_A, 1)
g = self.mul(temp_a, g)
g = self.matmul(g, matrix_G)
elif layer_type == LayerNorm:
damping = self.sqrt(damping_step)
normalizer = self.batch_size
normalizer = self.cast(normalizer, mstype.float32)
fim_cov = self.square(g)
fim_cov = self.mul(fim_cov, 1.0 / normalizer)
fim_cov = fim_cov + damping
fim_inv = self.inv(fim_cov)
g = self.mul(fim_inv, g)
new_grads = new_grads + (g,)
gradients = new_grads
if self.is_distributed and self.conv_layer_count == 0:
gradients = self.grad_reducer_g(gradients)
self.cov_step = self.cov_step + self.one
if self.weight_decay > 0:
gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)
lr = self.get_lr()
success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)
return success
class THOR_Ascend(Optimizer):
"""THOR"""
def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,
decay_filter=lambda x: x.name not in [], split_indices=None):
params = filter(lambda x: x.requires_grad, net.get_parameters())
super(THOR_Ascend, self).__init__(learning_rate, params, weight_decay, loss_scale)
if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
self.params = self.parameters
self.moments = self.params.clone(prefix="moments", init='zeros')
self.hyper_map = C.HyperMap()
self.opt = P.ApplyMomentum()
self.net = net
self.matrix_A_cov = ParameterTuple(filter(lambda x: 'matrix_A' in x.name, net.get_parameters()))
self.matrix_G_cov = ParameterTuple(filter(lambda x: 'matrix_G' in x.name, net.get_parameters()))
self.A_normalizer = ParameterTuple(filter(lambda x: 'A_normalizer' in x.name, net.get_parameters()))
self.G_normalizer = ParameterTuple(filter(lambda x: 'G_normalizer' in x.name, net.get_parameters()))
self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast()
self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft()
self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight()
self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul()
self.transpose = P.Transpose()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.mul = P.Mul()
self.C0 = 16
self.matrix_A_dim = ()
self.padA_flag = ()
self.device_shape_pad_flag = ()
self.diag_block_dim = 128
self.matrix_A = ()
self.matrix_G = ()
print("matrix_A_cov len is", len(self.matrix_A_cov))
self.thor_layer_count = 0
self.conv_layer_count = 0
self.weight_fim_idx_map = ()
self.weight_conv_idx_map = ()
self.weight_layerType_idx_map = ()
layer_type_map = get_net_layertype_mask(net)
layer_counter = 0
for idx in range(len(self.params)):
layer_type = layer_type_map[layer_counter]
weight = self.params[idx]
weight_shape = self.shape(weight)
if layer_type == Conv and "bias" not in self.params[idx].name.lower():
in_channels = weight_shape[1]
out_channels = weight_shape[0]
matrix_A_dim = in_channels * weight_shape[2] * weight_shape[3]
matrix_G_dim = out_channels
matrix_A_device_shape, matrix_A_device_dim = caculate_device_shape(matrix_A_dim, in_channels, True)
matrix_G_device_shape, matrix_G_device_dim = caculate_device_shape(matrix_G_dim, in_channels, False)
matrix_A_inv = Parameter(
Tensor(np.reshape(np.identity(matrix_A_device_dim).astype(np.float16), matrix_A_device_shape)),
name='matrix_A_inv_' + str(self.thor_layer_count), requires_grad=False)
matrix_G_inv = Parameter(
Tensor(np.reshape(np.identity(matrix_G_device_dim).astype(np.float16), matrix_G_device_shape)),
name="matrix_G_inv_" + str(self.thor_layer_count), requires_grad=False)
self.matrix_A = self.matrix_A + (matrix_A_inv,)
self.matrix_G = self.matrix_G + (matrix_G_inv,)
self.matrix_A_dim = self.matrix_A_dim + (matrix_A_dim,)
padA_flag = False
if (matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != matrix_A_dim \
and matrix_A_dim > self.diag_block_dim:
padA_flag = True
self.padA_flag = self.padA_flag + (padA_flag,)
device_shape_pad_flag = False
if matrix_A_dim != matrix_A_device_dim:
device_shape_pad_flag = True
self.device_shape_pad_flag = self.device_shape_pad_flag + (device_shape_pad_flag,)
elif layer_type == FC and "bias" not in self.params[idx].name.lower():
out_channels = weight_shape[0]
if out_channels == 1001:
fc_matrix_A = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)),
name='matrix_A_inv_' + str(self.thor_layer_count),
requires_grad=False)
fc_matrix_G = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)),
name="matrix_G_inv_" + str(self.thor_layer_count),
requires_grad=False)
self.matrix_A = self.matrix_A + (fc_matrix_A,)
self.matrix_G = self.matrix_G + (fc_matrix_G,)
if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower():
self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (layer_type,)
self.thor_layer_count = self.thor_layer_count + 1
if layer_type == Conv:
self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)
self.conv_layer_count = self.conv_layer_count + 1
else:
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
else:
self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
if layer_type == LayerNorm:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (LayerNorm,)
else:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (Other,)
# bert.cls1.output_bias: not a network layer, only a trainable param
if "output_bias" not in self.params[idx].name.lower():
layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)
self.matrix_A = ParameterTuple(self.matrix_A)
self.matrix_G = ParameterTuple(self.matrix_G)
self.matrix_max_inv = ()
for i in range(len(self.matrix_A)):
self.matrix_max_inv = self.matrix_max_inv + (
Parameter(initializer(1, [1], mstype.float32), name="matrix_max" + str(i), requires_grad=False),)
self.log = P.Log()
self.exp = P.Exp()
self.sqrt = P.Sqrt()
self.matrix_max_inv = ParameterTuple(self.matrix_max_inv)
self.assign = P.Assign()
self.cast = P.Cast()
self.thor = True
self.weight_decay = weight_decay * loss_scale
self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
self.damping = damping
self.gather = P.GatherV2()
self.one = Tensor(1, mstype.int32)
self.batch_size = Tensor(batch_size, mstype.float32)
self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)
self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)
self.axis = 0
self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
self.cast = P.Cast()
self.eye = P.Eye()
self.cholesky = P.CusCholeskyTrsm()
self.vector_matmul = P.CusBatchMatMul()
self.fused_abs_max2 = P.CusFusedAbsMax1()
self.matrix_combine = P.CusMatrixCombine()
self.slice = P.Slice()
self.expand = P.ExpandDims()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.square = P.Square()
self.inv = P.Inv()
self.matmul = P.MatMul()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
if self.is_distributed:
mean = _get_gradients_mean()
degree = _get_device_num()
if self.conv_layer_count > 0:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.matrix_A) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum2")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum4")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8")
self.grad_reducer_Amax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=2)
self.grad_reducer_Gmax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=4)
self.grad_reducer_A = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=6)
self.grad_reducer_G = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=8)
else:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.params) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum3")
self.grad_reducer_g = DistributedGradReducer(self.params, mean, degree, fusion_type=3)
def _get_Ainv_Ginv_Amax_Gmax_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce,
matrix_a_max_allreduce, matrix_g_max_allreduce):
"""get matrixA inverse list, matrixG inverse list, matrixA_max list, matrixG_max list"""
for i in range(len(self.params)):
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if layer_type in [Conv, FC, Embedding]:
g = gradients[i]
matrix_A = self.matrix_A_cov[thor_layer_count]
matrix_G = self.matrix_G_cov[thor_layer_count]
matrix_A = F.depend(matrix_A, g)
matrix_G = F.depend(matrix_G, g)
A_shape = self.shape(matrix_A)
A_eye = self.eye(A_shape[0], A_shape[0], mstype.float32)
G_shape = self.shape(matrix_G)
G_eye = self.eye(G_shape[0], G_shape[0], mstype.float32)
if layer_type == Conv:
A_normalizer = self.A_normalizer[conv_layer_count]
G_normalizer = self.G_normalizer[conv_layer_count]
A_normalizer = F.depend(A_normalizer, g)
G_normalizer = F.depend(G_normalizer, g)
dampingA = self.mul(damping_step, self.batch_size / A_normalizer)
dampingG = self.mul(damping_step, self.batch_size / G_normalizer)
dampingA = self.sqrt(dampingA)
matrix_A = matrix_A + dampingA * A_eye
matrix_A_inv = self.cholesky(matrix_A)
matrix_A_inv = self.vector_matmul(matrix_A_inv, matrix_A_inv)
A_max = P.CusFusedAbsMax1([self.matrix_A_dim[conv_layer_count],
self.matrix_A_dim[conv_layer_count]])(matrix_A_inv)
A_max = self.fused_abs_max2(A_max)
matrix_A_inv = self.matrix_combine(matrix_A_inv)
if self.padA_flag[conv_layer_count]:
matrix_A_inv = self.slice(matrix_A_inv, (0, 0), (self.matrix_A_dim[conv_layer_count],
self.matrix_A_dim[conv_layer_count]))
if self.device_shape_pad_flag[conv_layer_count]:
weight = self.params[i]
weight_shape = self.shape(weight)
kernel_hw = weight_shape[2] * weight_shape[3]
in_channels = weight_shape[1]
matrix_A_inv = self.reshape(matrix_A_inv, (kernel_hw, in_channels, kernel_hw, in_channels))
matrix_A_inv = P.Pad(((0, 0), (0, self.C0 - in_channels), (0, 0),
(0, self.C0 - in_channels)))(matrix_A_inv)
matrix_A_inv_shape = self.shape(self.matrix_A[thor_layer_count])
matrix_A_device_temp_shape = (matrix_A_inv_shape[0], matrix_A_inv_shape[2],
matrix_A_inv_shape[1], matrix_A_inv_shape[3])
matrix_A_inv = self.reshape(matrix_A_inv, matrix_A_device_temp_shape)
matrix_A_inv = self.transpose(matrix_A_inv, (2, 0, 1, 3))
dampingG = self.sqrt(dampingG)
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + dampingG * G_eye
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
G_max = self.fused_abs_max2(matrix_G_inv)
G_max = self.fused_abs_max2(G_max)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_G_inv_shape = self.shape(self.matrix_G[thor_layer_count])
matrix_G_device_temp_shape = (matrix_G_inv_shape[0], matrix_G_inv_shape[2],
matrix_G_inv_shape[1], matrix_G_inv_shape[3])
matrix_G_inv = self.reshape(matrix_G_inv, matrix_G_device_temp_shape)
matrix_G_inv = self.transpose(matrix_G_inv, (2, 0, 1, 3))
A_max = F.depend(A_max, g)
G_max = F.depend(G_max, g)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A_inv,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G_inv,)
matrix_a_max_allreduce = matrix_a_max_allreduce + (A_max,)
matrix_g_max_allreduce = matrix_g_max_allreduce + (G_max,)
elif layer_type == FC:
damping = self.sqrt(damping_step)
matrix_A = matrix_A + damping * A_eye
matrix_A_inv = self.cholesky(matrix_A)
matrix_A_inv = self.vector_matmul(matrix_A_inv, matrix_A_inv)
weight_shape = self.shape(self.params[i])
out_channels = weight_shape[0]
if out_channels == 2:
matrix_A_inv = self.matrix_combine(matrix_A_inv)
matrix_G_inv = G_eye
else:
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + damping * G_eye
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
if out_channels == 1001:
matrix_A_inv_max = self.fused_abs_max2(matrix_A_inv)
A_max = self.fused_abs_max2(matrix_A_inv_max)
matrix_A_inv = self.matrix_combine(matrix_A_inv)
matrix_A_inv_shape = self.shape(matrix_A_inv)
matrix_A_inv = self.reshape(matrix_A_inv,
(matrix_A_inv_shape[0] / 16, 16,
matrix_A_inv_shape[0] / 16, 16))
matrix_A_inv = self.transpose(matrix_A_inv, (2, 0, 1, 3))
matrix_G_inv_max = P.CusFusedAbsMax1([1001, 1001])(matrix_G_inv)
G_max = self.fused_abs_max2(matrix_G_inv_max)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_G_inv = self.slice(matrix_G_inv, (0, 0), (1001, 1001))
matrix_G_inv = P.Pad(((0, 7), (0, 7)))(matrix_G_inv)
matrix_G_inv_shape = self.shape(matrix_G_inv)
matrix_G_inv = self.reshape(matrix_G_inv,
(matrix_G_inv_shape[0] / 16, 16,
matrix_G_inv_shape[0] / 16, 16))
matrix_G_inv = self.transpose(matrix_G_inv, (2, 0, 1, 3))
A_max = F.depend(A_max, g)
G_max = F.depend(G_max, g)
matrix_a_max_allreduce = matrix_a_max_allreduce + (A_max,)
matrix_g_max_allreduce = matrix_g_max_allreduce + (G_max,)
else:
matrix_A_inv = self.matrix_combine(matrix_A_inv)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A_inv,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G_inv,)
elif layer_type == Embedding:
damping = self.sqrt(damping_step)
A_eye = P.OnesLike()(matrix_A)
matrix_A = self.mul(matrix_A, 1.0 / self.batch_size)
matrix_A = matrix_A + damping * A_eye
matrix_A_inv = self.inv(matrix_A)
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + damping * G_eye
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A_inv,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G_inv,)
return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce
def _process_layernorm(self, damping_step, gradient):
"""process layernorm layer for thor"""
damping = self.sqrt(damping_step)
normalizer = self.cast(self.batch_size, mstype.float32)
fim_cov = self.square(gradient)
fim_cov = self.mul(fim_cov, 1.0 / normalizer)
fim_cov = fim_cov + damping
fim_inv = self.inv(fim_cov)
gradient = self.mul(fim_inv, gradient)
return gradient
def _get_second_gradients(self, new_grads, damping_step, gradients):
"""get second gradients for thor"""
params_len = len(self.params)
for i in range(params_len):
g = gradients[i]
thor_layer_count = self.weight_fim_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if self.conv_layer_count > 0:
matrix_A = self.matrix_A[thor_layer_count]
matrix_G = self.matrix_G[thor_layer_count]
matrix_max = self.matrix_max_inv[thor_layer_count]
if layer_type == FC:
g = self.cube_matmul_left_fc(matrix_G, g)
g = self.cube_matmul_right_fc(g, matrix_A, matrix_max)
elif layer_type == Conv:
g = self.cube_matmul_left(matrix_G, g)
g = self.cube_matmul_right_mul(g, matrix_A, matrix_max)
else:
if layer_type == Embedding:
temp_a_ori = self.matrix_A_cov[thor_layer_count]
temp_g = self.matrix_G_cov[thor_layer_count]
temp_a = self.expand(temp_a_ori, 1)
g = self.mul(temp_a, g)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(g, temp_g)
g = self.cast(g, mstype.float32)
elif layer_type == FC:
temp_a = self.matrix_A_cov[thor_layer_count]
temp_g = self.matrix_G_cov[thor_layer_count]
temp_a = self.cast(temp_a, mstype.float16)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(temp_g, g)
g = self.matmul(g, temp_a)
g = self.cast(g, mstype.float32)
elif layer_type == LayerNorm:
g = self._process_layernorm(damping_step, g)
new_grads = new_grads + (g,)
return new_grads
def construct(self, gradients):
params = self.params
moments = self.moments
damping_step = self.gather(self.damping, self.cov_step, self.axis)
damping_step = self.cast(damping_step, mstype.float32)
if self.thor:
matrix_A_allreduce = ()
matrix_G_allreduce = ()
matrix_A_max_allreduce = ()
matrix_G_max_allreduce = ()
matrix_A_allreduce, matrix_G_allreduce, matrix_A_max_allreduce, matrix_G_max_allreduce = \
self._get_Ainv_Ginv_Amax_Gmax_list(gradients, damping_step, matrix_A_allreduce, matrix_G_allreduce,
matrix_A_max_allreduce, matrix_G_max_allreduce)
if self.is_distributed and self.conv_layer_count > 0:
matrix_A_allreduce = self.grad_reducer_A(matrix_A_allreduce)
matrix_G_allreduce = self.grad_reducer_G(matrix_G_allreduce)
matrix_A_max_allreduce = self.grad_reducer_Amax(matrix_A_max_allreduce)
matrix_G_max_allreduce = self.grad_reducer_Gmax(matrix_G_max_allreduce)
new_grads = ()
for i in range(len(self.params)):
g = gradients[i]
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if self.conv_layer_count > 0:
temp_a = matrix_A_allreduce[thor_layer_count]
temp_g = matrix_G_allreduce[thor_layer_count]
matrix_A_inv_max = self.log(matrix_A_max_allreduce[thor_layer_count])
matrix_A_inv_max = self.mul(matrix_A_inv_max, -1)
matrix_A_inv_max = self.exp(matrix_A_inv_max)
temp_a = self.mul(temp_a, matrix_A_inv_max)
matrix_G_inv_max = self.log(matrix_G_max_allreduce[thor_layer_count])
matrix_G_inv_max = self.mul(matrix_G_inv_max, -1)
matrix_G_inv_max = self.exp(matrix_G_inv_max)
temp_g = self.mul(temp_g, matrix_G_inv_max)
temp_max = self.mul(matrix_A_max_allreduce[thor_layer_count],
matrix_G_max_allreduce[thor_layer_count])
temp_a = self.cast(temp_a, mstype.float16)
temp_g = self.cast(temp_g, mstype.float16)
if layer_type == FC:
g = self.cube_matmul_left_fc(temp_g, g)
g = self.cube_matmul_right_fc(g, temp_a, temp_max)
elif layer_type == Conv:
A_normalizer = self.A_normalizer[conv_layer_count]
A_normalizer = F.depend(A_normalizer, g)
temp_max = self.mul(temp_max, self.batch_size / A_normalizer)
g = self.cube_matmul_left(temp_g, g)
g = self.cube_matmul_right_mul(g, temp_a, temp_max)
fake_A = self.assign(self.matrix_A[thor_layer_count], temp_a)
fake_G = self.assign(self.matrix_G[thor_layer_count], temp_g)
fake_max = self.assign(self.matrix_max_inv[thor_layer_count], temp_max)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
g = F.depend(g, fake_max)
else:
if layer_type == Embedding:
temp_a_ori = matrix_A_allreduce[thor_layer_count]
temp_g = matrix_G_allreduce[thor_layer_count]
fake_A = self.assign(self.matrix_A_cov[thor_layer_count], temp_a_ori)
fake_G = self.assign(self.matrix_G_cov[thor_layer_count], temp_g)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
temp_a = self.expand(temp_a_ori, 1)
g = self.mul(temp_a, g)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(g, temp_g)
g = self.cast(g, mstype.float32)
elif layer_type == FC:
temp_a = matrix_A_allreduce[thor_layer_count]
temp_g = matrix_G_allreduce[thor_layer_count]
fake_A = self.assign(self.matrix_A_cov[thor_layer_count], temp_a)
fake_G = self.assign(self.matrix_G_cov[thor_layer_count], temp_g)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
temp_a = self.cast(temp_a, mstype.float16)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(temp_g, g)
g = self.matmul(g, temp_a)
g = self.cast(g, mstype.float32)
elif layer_type == LayerNorm:
g = self._process_layernorm(damping_step, g)
new_grads = new_grads + (g,)
gradients = new_grads
else:
new_grads = ()
gradients = self._get_second_gradients(new_grads, damping_step, gradients)
if self.is_distributed and self.conv_layer_count == 0:
gradients = self.grad_reducer_g(gradients)
self.cov_step = self.cov_step + self.one
if self.weight_decay > 0:
gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)
gradients = self.scale_grad(gradients)
lr = self.get_lr()
success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)
return success | mindspore/nn/optim/thor.py | """THOR"""
import numpy as np
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common.tensor import Tensor
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator
from mindspore.nn.optim.optimizer import Optimizer
from mindspore.parallel._utils import _get_device_num, _get_gradients_mean
from mindspore import context
from mindspore.context import ParallelMode
from mindspore.nn.layer import Dense_Thor, Conv2d_Thor, Embedding_Thor
from mindspore.nn.wrap import DistributedGradReducer
from mindspore.train.train_thor.convert_utils import ConvertNetUntils
from mindspore.parallel._auto_parallel_context import auto_parallel_context
# Enumerates types of Layer
Other = -1
Conv = 1
FC = 2
Embedding = 3
LayerNorm = 4
BatchNorm = 5
_momentum_opt = C.MultitypeFuncGraph("momentum_opt")
op_add = P.AddN()
apply_decay = C.MultitypeFuncGraph("apply_decay")
@apply_decay.register("Number", "Bool", "Tensor", "Tensor")
def _tensor_apply_decay(weight_decay, if_apply, weight, gradient):
"""Get grad with weight_decay."""
if if_apply:
return op_add((weight * weight_decay, gradient))
return gradient
@_momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment):
"""Apply momentum optimizer to the weight parameter using Tensor."""
success = True
success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum))
return success
C0 = 16
def caculate_device_shape(matrix_dim, channel, is_A):
ll = (0)
if is_A:
if channel // C0 == 0:
matrix_dim = (matrix_dim / channel) * C0
ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim)
else:
ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim)
return ll
def caculate_matmul_shape(matrix_A_dim, matrix_G_dim, split_dim):
"""get matmul shape"""
split_dimA = split_dim
split_dimG = split_dim
if matrix_A_dim % split_dim == 0:
batch_w = matrix_A_dim // split_dim
else:
if matrix_A_dim < split_dim:
batch_w = 1
split_dimA = matrix_A_dim
else:
batch_w = matrix_A_dim // split_dim + 1
if matrix_G_dim % split_dim == 0:
batch_h = matrix_G_dim // split_dim
else:
if matrix_G_dim < split_dim:
batch_h = 1
split_dimG = matrix_G_dim
else:
batch_h = matrix_G_dim // split_dim + 1
matrix_A_shape = (batch_h, batch_w, split_dimA, split_dimA)
matrix_G_shape = (batch_h, split_dimG, split_dimG)
return matrix_A_shape, matrix_G_shape
def find_net_layertype_recur(net, layertype_map):
"""get net layer type recursively."""
cells = net.name_cells()
for name in cells:
subcell = cells[name]
print("thor subcell name: ", name)
if subcell == net:
continue
elif isinstance(subcell, Conv2d_Thor):
layertype_map.append(Conv)
elif isinstance(subcell, Dense_Thor):
layertype_map.append(FC)
elif isinstance(subcell, Embedding_Thor):
layertype_map.append(Embedding)
elif isinstance(subcell, nn.LayerNorm):
layertype_map.append(LayerNorm)
elif isinstance(subcell, nn.BatchNorm2d):
layertype_map.append(BatchNorm)
elif isinstance(subcell, (nn.Conv2d, nn.Dense, nn.Embedding, nn.Conv2dTranspose, nn.Conv1d, nn.Conv1dTranspose,
nn.BatchNorm1d, nn.GroupNorm, nn.GlobalBatchNorm)):
layertype_map.append(Other)
else:
find_net_layertype_recur(subcell, layertype_map)
def get_net_layertype_mask(net):
layertype_map = []
find_net_layertype_recur(net, layertype_map)
return layertype_map
def get_layer_counter(layer_type, layer_counter, params, idx):
"""get layer counter"""
if layer_type in [Conv, FC, LayerNorm, BatchNorm]:
if layer_type in [LayerNorm, BatchNorm]:
if "beta" in params[idx].name.lower():
layer_counter = layer_counter + 1
else:
if "bias" in params[idx].name.lower():
layer_counter = layer_counter + 1
else:
if idx < len(params) - 1 and "bias" not in params[idx + 1].name.lower():
layer_counter = layer_counter + 1
else:
layer_counter = layer_counter + 1
return layer_counter
def THOR(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,
use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None):
context.set_context(max_call_depth=10000)
ConvertNetUntils().convert_to_thor_net(net)
if context.get_context("device_target") == "Ascend":
return THOR_Ascend(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, decay_filter,
split_indices=split_indices)
return THOR_GPU(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size,
use_nesterov, decay_filter, split_indices=split_indices)
class THOR_GPU(Optimizer):
"""
THOR_GPU
"""
def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,
use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None):
params = filter(lambda x: x.requires_grad, net.get_parameters())
super(THOR_GPU, self).__init__(learning_rate, params, weight_decay, loss_scale)
Validator.check_value_type("momentum", momentum, [float], self.cls_name)
if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
self.params = self.parameters
self.use_nesterov = Validator.check_bool(use_nesterov)
self.moments = self.params.clone(prefix="moments", init='zeros')
self.hyper_map = C.HyperMap()
self.opt = P.ApplyMomentum(use_nesterov=self.use_nesterov)
self.net = net
self.matrix_A_cov = ParameterTuple(filter(lambda x: 'matrix_A' in x.name, net.get_parameters()))
self.matrix_G_cov = ParameterTuple(filter(lambda x: 'matrix_G' in x.name, net.get_parameters()))
self.A_normalizer = ParameterTuple(filter(lambda x: 'A_normalizer' in x.name, net.get_parameters()))
self.G_normalizer = ParameterTuple(filter(lambda x: 'G_normalizer' in x.name, net.get_parameters()))
self.transpose = P.Transpose()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.matmul = P.MatMul()
self.assign = P.Assign()
self.mul = P.Mul()
self.damping = damping
self.gather = P.GatherV2()
self.one = Tensor(1, mstype.int32)
self.batch_size = Tensor(batch_size, mstype.float32)
self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)
self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)
self.feature_map = Tensor(1.0, mstype.float32)
self.axis = 0
self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
self.cast = P.Cast()
self.sqrt = P.Sqrt()
self.eye = P.Eye()
split_dim = 128
self.embedding_cholesky = P.CholeskyTrsm()
self.cholesky = P.CholeskyTrsm(split_dim=split_dim)
self.vector_matmul = P.BatchMatMul(transpose_a=True)
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.inv = P.Reciprocal()
self.square = P.Square()
self.expand = P.ExpandDims()
self.thor = True
self.matrix_A = ()
self.matrix_G = ()
self.matrix_A_shape = ()
self.thor_layer_count = 0
self.conv_layer_count = 0
self.weight_fim_idx_map = ()
self.weight_conv_idx_map = ()
self.weight_layerType_idx_map = ()
layer_type_map = get_net_layertype_mask(net)
layer_counter = 0
for idx in range(len(self.params)):
layer_type = layer_type_map[layer_counter]
weight = self.params[idx]
weight_shape = self.shape(weight)
if layer_type in [Conv, FC] and "bias" not in self.params[idx].name.lower():
in_channels = weight_shape[1]
out_channels = weight_shape[0]
matrix_A_dim = in_channels
if layer_type == Conv:
matrix_A_dim = in_channels * weight_shape[2] * weight_shape[3]
matrix_G_dim = out_channels
matrix_A_shape, matrix_G_shape = caculate_matmul_shape(matrix_A_dim, matrix_G_dim, split_dim)
matrix_A_inv = Parameter(np.zeros(matrix_A_shape).astype(np.float32),
name='matrix_A_inv_' + str(self.thor_layer_count), requires_grad=False)
matrix_G_inv = Parameter(np.zeros(matrix_G_shape).astype(np.float32),
name="matrix_G_inv_" + str(self.thor_layer_count), requires_grad=False)
self.matrix_A = self.matrix_A + (matrix_A_inv,)
self.matrix_G = self.matrix_G + (matrix_G_inv,)
self.matrix_A_shape = self.matrix_A_shape + (matrix_A_shape,)
elif layer_type == Embedding:
vocab_size = weight_shape[0]
embedding_size = weight_shape[1]
matrix_A_inv = Parameter(Tensor(np.zeros([vocab_size]).astype(np.float32)),
name='matrix_A_inv_' + str(self.thor_layer_count), requires_grad=False)
matrix_G_inv = Parameter(Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float32)),
name="matrix_G_inv_" + str(self.thor_layer_count), requires_grad=False)
self.matrix_A = self.matrix_A + (matrix_A_inv,)
self.matrix_G = self.matrix_G + (matrix_G_inv,)
self.matrix_A_shape = self.matrix_A_shape + ((vocab_size,),)
if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower():
self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (layer_type,)
self.thor_layer_count = self.thor_layer_count + 1
if layer_type == Conv:
self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)
self.conv_layer_count = self.conv_layer_count + 1
else:
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
else:
self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
if layer_type == LayerNorm:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (LayerNorm,)
else:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (Other,)
# bert.cls1.output_bias: not a network layer, only a trainable param
if "output_bias" not in self.params[idx].name.lower():
layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)
self.matrix_A = ParameterTuple(self.matrix_A)
self.matrix_G = ParameterTuple(self.matrix_G)
self.weight_decay = weight_decay
self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
self.update_gradient = P.UpdateThorGradient(split_dim=split_dim)
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
if self.is_distributed:
mean = _get_gradients_mean()
degree = _get_device_num()
if self.conv_layer_count > 0:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.matrix_A) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum2")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum4")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8")
self.grad_reducer_Amax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=2)
self.grad_reducer_Gmax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=4)
self.grad_reducer_A = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=6)
self.grad_reducer_G = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=8)
else:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.params) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum3")
self.grad_reducer_g = DistributedGradReducer(self.params, mean, degree, fusion_type=3)
def _get_Ainv_Ginv_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce):
"""get matrixA inverse list and matrix G inverse list"""
for i in range(len(self.params)):
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if layer_type in [Conv, FC, Embedding]:
g = gradients[i]
matrix_A = self.matrix_A_cov[thor_layer_count]
matrix_G = self.matrix_G_cov[thor_layer_count]
matrix_A = F.depend(matrix_A, g)
matrix_G = F.depend(matrix_G, g)
dampingA = damping_step
dampingG = damping_step
feature_map = self.feature_map
if layer_type == Conv:
A_normalizer = self.A_normalizer[conv_layer_count]
G_normalizer = self.G_normalizer[conv_layer_count]
A_normalizer = F.depend(A_normalizer, g)
G_normalizer = F.depend(G_normalizer, g)
dampingA = self.mul(damping_step, 1.0 / A_normalizer)
dampingG = self.mul(damping_step, 1.0 / G_normalizer)
feature_map = self.sqrt(1.0 / A_normalizer)
A_shape = self.shape(matrix_A)
A_eye = self.eye(A_shape[0], A_shape[0], mstype.float32)
dampingA = self.sqrt(dampingA)
dampingG = self.sqrt(dampingG)
G_shape = self.shape(matrix_G)
G_eye = self.eye(G_shape[0], G_shape[1], mstype.float32)
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + dampingG * G_eye
if layer_type == Embedding:
A_eye = P.OnesLike()(matrix_A)
matrix_A = self.mul(matrix_A, 1.0 / self.batch_size)
matrix_A = matrix_A + dampingA * A_eye
matrix_A = self.inv(matrix_A)
matrix_G = self.embedding_cholesky(matrix_G)
matrix_G = self.matmul(matrix_G, matrix_G)
else:
matrix_A = matrix_A + dampingA * A_eye
matrix_A = self.cholesky(matrix_A)
matrix_A = self.vector_matmul(matrix_A, matrix_A)
matrix_A = P.BroadcastTo(self.matrix_A_shape[thor_layer_count])(matrix_A)
matrix_G = self.cholesky(matrix_G)
matrix_G = self.vector_matmul(matrix_G, matrix_G)
matrix_A = self.mul(matrix_A, feature_map)
matrix_G = self.mul(matrix_G, feature_map)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G,)
return matrix_a_allreduce, matrix_g_allreduce
def construct(self, gradients):
params = self.params
moments = self.moments
gradients = self.scale_grad(gradients)
damping_step = self.gather(self.damping, self.cov_step, self.axis)
damping_step = self.cast(damping_step, mstype.float32)
new_grads = ()
if self.thor:
matrix_Ainv_list = ()
matrix_Ginv_list = ()
matrix_A_allreduce, matrix_G_allreduce = self._get_Ainv_Ginv_list(gradients, damping_step,
matrix_Ainv_list, matrix_Ginv_list)
if self.is_distributed and self.conv_layer_count > 0:
matrix_A_allreduce = self.grad_reducer_A(matrix_A_allreduce)
matrix_G_allreduce = self.grad_reducer_G(matrix_G_allreduce)
for i in range(len(self.params)):
g = gradients[i]
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if layer_type in [Conv, FC]:
g_shape = self.shape(g)
g = self.reshape(g, (g_shape[0], -1))
matrix_A = matrix_A_allreduce[thor_layer_count]
matrix_G = matrix_G_allreduce[thor_layer_count]
g = self.update_gradient(matrix_G, g, matrix_A)
fake_A = self.assign(self.matrix_A[thor_layer_count], matrix_A)
fake_G = self.assign(self.matrix_G[thor_layer_count], matrix_G)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
if conv_layer_count != -1:
g = self.reshape(g, g_shape)
elif layer_type == Embedding:
matrix_A = matrix_A_allreduce[thor_layer_count]
matrix_G = matrix_G_allreduce[thor_layer_count]
fake_A = self.assign(self.matrix_A[thor_layer_count], matrix_A)
fake_G = self.assign(self.matrix_G[thor_layer_count], matrix_G)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
temp_a = self.expand(matrix_A, 1)
g = self.mul(temp_a, g)
g = self.matmul(g, matrix_G)
elif layer_type == LayerNorm:
damping = self.sqrt(damping_step)
normalizer = self.batch_size
normalizer = self.cast(normalizer, mstype.float32)
fim_cov = self.square(g)
fim_cov = self.mul(fim_cov, 1.0 / normalizer)
fim_cov = fim_cov + damping
fim_inv = self.inv(fim_cov)
g = self.mul(fim_inv, g)
new_grads = new_grads + (g,)
else:
for j in range(len(self.params)):
g = gradients[j]
thor_layer_count = self.weight_fim_idx_map[j]
conv_layer_count = self.weight_conv_idx_map[j]
layer_type = self.weight_layerType_idx_map[j]
if layer_type in [Conv, FC]:
g_shape = self.shape(g)
g = self.reshape(g, (g_shape[0], -1))
matrix_A = self.matrix_A[thor_layer_count]
matrix_G = self.matrix_G[thor_layer_count]
g = self.update_gradient(matrix_G, g, matrix_A)
if conv_layer_count != -1:
g = self.reshape(g, g_shape)
elif layer_type == Embedding:
matrix_A = self.matrix_A[thor_layer_count]
matrix_G = self.matrix_G[thor_layer_count]
g = gradients[j]
temp_a = self.expand(matrix_A, 1)
g = self.mul(temp_a, g)
g = self.matmul(g, matrix_G)
elif layer_type == LayerNorm:
damping = self.sqrt(damping_step)
normalizer = self.batch_size
normalizer = self.cast(normalizer, mstype.float32)
fim_cov = self.square(g)
fim_cov = self.mul(fim_cov, 1.0 / normalizer)
fim_cov = fim_cov + damping
fim_inv = self.inv(fim_cov)
g = self.mul(fim_inv, g)
new_grads = new_grads + (g,)
gradients = new_grads
if self.is_distributed and self.conv_layer_count == 0:
gradients = self.grad_reducer_g(gradients)
self.cov_step = self.cov_step + self.one
if self.weight_decay > 0:
gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)
lr = self.get_lr()
success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)
return success
class THOR_Ascend(Optimizer):
"""THOR"""
def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,
decay_filter=lambda x: x.name not in [], split_indices=None):
params = filter(lambda x: x.requires_grad, net.get_parameters())
super(THOR_Ascend, self).__init__(learning_rate, params, weight_decay, loss_scale)
if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
self.params = self.parameters
self.moments = self.params.clone(prefix="moments", init='zeros')
self.hyper_map = C.HyperMap()
self.opt = P.ApplyMomentum()
self.net = net
self.matrix_A_cov = ParameterTuple(filter(lambda x: 'matrix_A' in x.name, net.get_parameters()))
self.matrix_G_cov = ParameterTuple(filter(lambda x: 'matrix_G' in x.name, net.get_parameters()))
self.A_normalizer = ParameterTuple(filter(lambda x: 'A_normalizer' in x.name, net.get_parameters()))
self.G_normalizer = ParameterTuple(filter(lambda x: 'G_normalizer' in x.name, net.get_parameters()))
self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast()
self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft()
self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight()
self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul()
self.transpose = P.Transpose()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.mul = P.Mul()
self.C0 = 16
self.matrix_A_dim = ()
self.padA_flag = ()
self.device_shape_pad_flag = ()
self.diag_block_dim = 128
self.matrix_A = ()
self.matrix_G = ()
print("matrix_A_cov len is", len(self.matrix_A_cov))
self.thor_layer_count = 0
self.conv_layer_count = 0
self.weight_fim_idx_map = ()
self.weight_conv_idx_map = ()
self.weight_layerType_idx_map = ()
layer_type_map = get_net_layertype_mask(net)
layer_counter = 0
for idx in range(len(self.params)):
layer_type = layer_type_map[layer_counter]
weight = self.params[idx]
weight_shape = self.shape(weight)
if layer_type == Conv and "bias" not in self.params[idx].name.lower():
in_channels = weight_shape[1]
out_channels = weight_shape[0]
matrix_A_dim = in_channels * weight_shape[2] * weight_shape[3]
matrix_G_dim = out_channels
matrix_A_device_shape, matrix_A_device_dim = caculate_device_shape(matrix_A_dim, in_channels, True)
matrix_G_device_shape, matrix_G_device_dim = caculate_device_shape(matrix_G_dim, in_channels, False)
matrix_A_inv = Parameter(
Tensor(np.reshape(np.identity(matrix_A_device_dim).astype(np.float16), matrix_A_device_shape)),
name='matrix_A_inv_' + str(self.thor_layer_count), requires_grad=False)
matrix_G_inv = Parameter(
Tensor(np.reshape(np.identity(matrix_G_device_dim).astype(np.float16), matrix_G_device_shape)),
name="matrix_G_inv_" + str(self.thor_layer_count), requires_grad=False)
self.matrix_A = self.matrix_A + (matrix_A_inv,)
self.matrix_G = self.matrix_G + (matrix_G_inv,)
self.matrix_A_dim = self.matrix_A_dim + (matrix_A_dim,)
padA_flag = False
if (matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != matrix_A_dim \
and matrix_A_dim > self.diag_block_dim:
padA_flag = True
self.padA_flag = self.padA_flag + (padA_flag,)
device_shape_pad_flag = False
if matrix_A_dim != matrix_A_device_dim:
device_shape_pad_flag = True
self.device_shape_pad_flag = self.device_shape_pad_flag + (device_shape_pad_flag,)
elif layer_type == FC and "bias" not in self.params[idx].name.lower():
out_channels = weight_shape[0]
if out_channels == 1001:
fc_matrix_A = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)),
name='matrix_A_inv_' + str(self.thor_layer_count),
requires_grad=False)
fc_matrix_G = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)),
name="matrix_G_inv_" + str(self.thor_layer_count),
requires_grad=False)
self.matrix_A = self.matrix_A + (fc_matrix_A,)
self.matrix_G = self.matrix_G + (fc_matrix_G,)
if layer_type in [Conv, FC, Embedding] and "bias" not in self.params[idx].name.lower():
self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (layer_type,)
self.thor_layer_count = self.thor_layer_count + 1
if layer_type == Conv:
self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)
self.conv_layer_count = self.conv_layer_count + 1
else:
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
else:
self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)
self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)
if layer_type == LayerNorm:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (LayerNorm,)
else:
self.weight_layerType_idx_map = self.weight_layerType_idx_map + (Other,)
# bert.cls1.output_bias: not a network layer, only a trainable param
if "output_bias" not in self.params[idx].name.lower():
layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)
self.matrix_A = ParameterTuple(self.matrix_A)
self.matrix_G = ParameterTuple(self.matrix_G)
self.matrix_max_inv = ()
for i in range(len(self.matrix_A)):
self.matrix_max_inv = self.matrix_max_inv + (
Parameter(initializer(1, [1], mstype.float32), name="matrix_max" + str(i), requires_grad=False),)
self.log = P.Log()
self.exp = P.Exp()
self.sqrt = P.Sqrt()
self.matrix_max_inv = ParameterTuple(self.matrix_max_inv)
self.assign = P.Assign()
self.cast = P.Cast()
self.thor = True
self.weight_decay = weight_decay * loss_scale
self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
self.damping = damping
self.gather = P.GatherV2()
self.one = Tensor(1, mstype.int32)
self.batch_size = Tensor(batch_size, mstype.float32)
self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)
self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)
self.axis = 0
self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
self.cast = P.Cast()
self.eye = P.Eye()
self.cholesky = P.CusCholeskyTrsm()
self.vector_matmul = P.CusBatchMatMul()
self.fused_abs_max2 = P.CusFusedAbsMax1()
self.matrix_combine = P.CusMatrixCombine()
self.slice = P.Slice()
self.expand = P.ExpandDims()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.square = P.Square()
self.inv = P.Inv()
self.matmul = P.MatMul()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
if self.is_distributed:
mean = _get_gradients_mean()
degree = _get_device_num()
if self.conv_layer_count > 0:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.matrix_A) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum2")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum4")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum6")
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum8")
self.grad_reducer_Amax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=2)
self.grad_reducer_Gmax = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=4)
self.grad_reducer_A = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=6)
self.grad_reducer_G = DistributedGradReducer(self.matrix_A, mean, degree, fusion_type=8)
else:
if not split_indices:
self.split_indices = split_indices
else:
self.split_indices = [len(self.params) - 1]
auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, "hccl_world_groupsum3")
self.grad_reducer_g = DistributedGradReducer(self.params, mean, degree, fusion_type=3)
def _get_Ainv_Ginv_Amax_Gmax_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce,
matrix_a_max_allreduce, matrix_g_max_allreduce):
"""get matrixA inverse list, matrixG inverse list, matrixA_max list, matrixG_max list"""
for i in range(len(self.params)):
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if layer_type in [Conv, FC, Embedding]:
g = gradients[i]
matrix_A = self.matrix_A_cov[thor_layer_count]
matrix_G = self.matrix_G_cov[thor_layer_count]
matrix_A = F.depend(matrix_A, g)
matrix_G = F.depend(matrix_G, g)
A_shape = self.shape(matrix_A)
A_eye = self.eye(A_shape[0], A_shape[0], mstype.float32)
G_shape = self.shape(matrix_G)
G_eye = self.eye(G_shape[0], G_shape[0], mstype.float32)
if layer_type == Conv:
A_normalizer = self.A_normalizer[conv_layer_count]
G_normalizer = self.G_normalizer[conv_layer_count]
A_normalizer = F.depend(A_normalizer, g)
G_normalizer = F.depend(G_normalizer, g)
dampingA = self.mul(damping_step, self.batch_size / A_normalizer)
dampingG = self.mul(damping_step, self.batch_size / G_normalizer)
dampingA = self.sqrt(dampingA)
matrix_A = matrix_A + dampingA * A_eye
matrix_A_inv = self.cholesky(matrix_A)
matrix_A_inv = self.vector_matmul(matrix_A_inv, matrix_A_inv)
A_max = P.CusFusedAbsMax1([self.matrix_A_dim[conv_layer_count],
self.matrix_A_dim[conv_layer_count]])(matrix_A_inv)
A_max = self.fused_abs_max2(A_max)
matrix_A_inv = self.matrix_combine(matrix_A_inv)
if self.padA_flag[conv_layer_count]:
matrix_A_inv = self.slice(matrix_A_inv, (0, 0), (self.matrix_A_dim[conv_layer_count],
self.matrix_A_dim[conv_layer_count]))
if self.device_shape_pad_flag[conv_layer_count]:
weight = self.params[i]
weight_shape = self.shape(weight)
kernel_hw = weight_shape[2] * weight_shape[3]
in_channels = weight_shape[1]
matrix_A_inv = self.reshape(matrix_A_inv, (kernel_hw, in_channels, kernel_hw, in_channels))
matrix_A_inv = P.Pad(((0, 0), (0, self.C0 - in_channels), (0, 0),
(0, self.C0 - in_channels)))(matrix_A_inv)
matrix_A_inv_shape = self.shape(self.matrix_A[thor_layer_count])
matrix_A_device_temp_shape = (matrix_A_inv_shape[0], matrix_A_inv_shape[2],
matrix_A_inv_shape[1], matrix_A_inv_shape[3])
matrix_A_inv = self.reshape(matrix_A_inv, matrix_A_device_temp_shape)
matrix_A_inv = self.transpose(matrix_A_inv, (2, 0, 1, 3))
dampingG = self.sqrt(dampingG)
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + dampingG * G_eye
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
G_max = self.fused_abs_max2(matrix_G_inv)
G_max = self.fused_abs_max2(G_max)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_G_inv_shape = self.shape(self.matrix_G[thor_layer_count])
matrix_G_device_temp_shape = (matrix_G_inv_shape[0], matrix_G_inv_shape[2],
matrix_G_inv_shape[1], matrix_G_inv_shape[3])
matrix_G_inv = self.reshape(matrix_G_inv, matrix_G_device_temp_shape)
matrix_G_inv = self.transpose(matrix_G_inv, (2, 0, 1, 3))
A_max = F.depend(A_max, g)
G_max = F.depend(G_max, g)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A_inv,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G_inv,)
matrix_a_max_allreduce = matrix_a_max_allreduce + (A_max,)
matrix_g_max_allreduce = matrix_g_max_allreduce + (G_max,)
elif layer_type == FC:
damping = self.sqrt(damping_step)
matrix_A = matrix_A + damping * A_eye
matrix_A_inv = self.cholesky(matrix_A)
matrix_A_inv = self.vector_matmul(matrix_A_inv, matrix_A_inv)
weight_shape = self.shape(self.params[i])
out_channels = weight_shape[0]
if out_channels == 2:
matrix_A_inv = self.matrix_combine(matrix_A_inv)
matrix_G_inv = G_eye
else:
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + damping * G_eye
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
if out_channels == 1001:
matrix_A_inv_max = self.fused_abs_max2(matrix_A_inv)
A_max = self.fused_abs_max2(matrix_A_inv_max)
matrix_A_inv = self.matrix_combine(matrix_A_inv)
matrix_A_inv_shape = self.shape(matrix_A_inv)
matrix_A_inv = self.reshape(matrix_A_inv,
(matrix_A_inv_shape[0] / 16, 16,
matrix_A_inv_shape[0] / 16, 16))
matrix_A_inv = self.transpose(matrix_A_inv, (2, 0, 1, 3))
matrix_G_inv_max = P.CusFusedAbsMax1([1001, 1001])(matrix_G_inv)
G_max = self.fused_abs_max2(matrix_G_inv_max)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_G_inv = self.slice(matrix_G_inv, (0, 0), (1001, 1001))
matrix_G_inv = P.Pad(((0, 7), (0, 7)))(matrix_G_inv)
matrix_G_inv_shape = self.shape(matrix_G_inv)
matrix_G_inv = self.reshape(matrix_G_inv,
(matrix_G_inv_shape[0] / 16, 16,
matrix_G_inv_shape[0] / 16, 16))
matrix_G_inv = self.transpose(matrix_G_inv, (2, 0, 1, 3))
A_max = F.depend(A_max, g)
G_max = F.depend(G_max, g)
matrix_a_max_allreduce = matrix_a_max_allreduce + (A_max,)
matrix_g_max_allreduce = matrix_g_max_allreduce + (G_max,)
else:
matrix_A_inv = self.matrix_combine(matrix_A_inv)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A_inv,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G_inv,)
elif layer_type == Embedding:
damping = self.sqrt(damping_step)
A_eye = P.OnesLike()(matrix_A)
matrix_A = self.mul(matrix_A, 1.0 / self.batch_size)
matrix_A = matrix_A + damping * A_eye
matrix_A_inv = self.inv(matrix_A)
matrix_G = self.mul(matrix_G, self.loss_scale)
matrix_G = self.mul(matrix_G, self.batch_size_scale)
matrix_G = matrix_G + damping * G_eye
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_a_allreduce = matrix_a_allreduce + (matrix_A_inv,)
matrix_g_allreduce = matrix_g_allreduce + (matrix_G_inv,)
return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce
def _process_layernorm(self, damping_step, gradient):
"""process layernorm layer for thor"""
damping = self.sqrt(damping_step)
normalizer = self.cast(self.batch_size, mstype.float32)
fim_cov = self.square(gradient)
fim_cov = self.mul(fim_cov, 1.0 / normalizer)
fim_cov = fim_cov + damping
fim_inv = self.inv(fim_cov)
gradient = self.mul(fim_inv, gradient)
return gradient
def _get_second_gradients(self, new_grads, damping_step, gradients):
"""get second gradients for thor"""
params_len = len(self.params)
for i in range(params_len):
g = gradients[i]
thor_layer_count = self.weight_fim_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if self.conv_layer_count > 0:
matrix_A = self.matrix_A[thor_layer_count]
matrix_G = self.matrix_G[thor_layer_count]
matrix_max = self.matrix_max_inv[thor_layer_count]
if layer_type == FC:
g = self.cube_matmul_left_fc(matrix_G, g)
g = self.cube_matmul_right_fc(g, matrix_A, matrix_max)
elif layer_type == Conv:
g = self.cube_matmul_left(matrix_G, g)
g = self.cube_matmul_right_mul(g, matrix_A, matrix_max)
else:
if layer_type == Embedding:
temp_a_ori = self.matrix_A_cov[thor_layer_count]
temp_g = self.matrix_G_cov[thor_layer_count]
temp_a = self.expand(temp_a_ori, 1)
g = self.mul(temp_a, g)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(g, temp_g)
g = self.cast(g, mstype.float32)
elif layer_type == FC:
temp_a = self.matrix_A_cov[thor_layer_count]
temp_g = self.matrix_G_cov[thor_layer_count]
temp_a = self.cast(temp_a, mstype.float16)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(temp_g, g)
g = self.matmul(g, temp_a)
g = self.cast(g, mstype.float32)
elif layer_type == LayerNorm:
g = self._process_layernorm(damping_step, g)
new_grads = new_grads + (g,)
return new_grads
def construct(self, gradients):
params = self.params
moments = self.moments
damping_step = self.gather(self.damping, self.cov_step, self.axis)
damping_step = self.cast(damping_step, mstype.float32)
if self.thor:
matrix_A_allreduce = ()
matrix_G_allreduce = ()
matrix_A_max_allreduce = ()
matrix_G_max_allreduce = ()
matrix_A_allreduce, matrix_G_allreduce, matrix_A_max_allreduce, matrix_G_max_allreduce = \
self._get_Ainv_Ginv_Amax_Gmax_list(gradients, damping_step, matrix_A_allreduce, matrix_G_allreduce,
matrix_A_max_allreduce, matrix_G_max_allreduce)
if self.is_distributed and self.conv_layer_count > 0:
matrix_A_allreduce = self.grad_reducer_A(matrix_A_allreduce)
matrix_G_allreduce = self.grad_reducer_G(matrix_G_allreduce)
matrix_A_max_allreduce = self.grad_reducer_Amax(matrix_A_max_allreduce)
matrix_G_max_allreduce = self.grad_reducer_Gmax(matrix_G_max_allreduce)
new_grads = ()
for i in range(len(self.params)):
g = gradients[i]
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layerType_idx_map[i]
if self.conv_layer_count > 0:
temp_a = matrix_A_allreduce[thor_layer_count]
temp_g = matrix_G_allreduce[thor_layer_count]
matrix_A_inv_max = self.log(matrix_A_max_allreduce[thor_layer_count])
matrix_A_inv_max = self.mul(matrix_A_inv_max, -1)
matrix_A_inv_max = self.exp(matrix_A_inv_max)
temp_a = self.mul(temp_a, matrix_A_inv_max)
matrix_G_inv_max = self.log(matrix_G_max_allreduce[thor_layer_count])
matrix_G_inv_max = self.mul(matrix_G_inv_max, -1)
matrix_G_inv_max = self.exp(matrix_G_inv_max)
temp_g = self.mul(temp_g, matrix_G_inv_max)
temp_max = self.mul(matrix_A_max_allreduce[thor_layer_count],
matrix_G_max_allreduce[thor_layer_count])
temp_a = self.cast(temp_a, mstype.float16)
temp_g = self.cast(temp_g, mstype.float16)
if layer_type == FC:
g = self.cube_matmul_left_fc(temp_g, g)
g = self.cube_matmul_right_fc(g, temp_a, temp_max)
elif layer_type == Conv:
A_normalizer = self.A_normalizer[conv_layer_count]
A_normalizer = F.depend(A_normalizer, g)
temp_max = self.mul(temp_max, self.batch_size / A_normalizer)
g = self.cube_matmul_left(temp_g, g)
g = self.cube_matmul_right_mul(g, temp_a, temp_max)
fake_A = self.assign(self.matrix_A[thor_layer_count], temp_a)
fake_G = self.assign(self.matrix_G[thor_layer_count], temp_g)
fake_max = self.assign(self.matrix_max_inv[thor_layer_count], temp_max)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
g = F.depend(g, fake_max)
else:
if layer_type == Embedding:
temp_a_ori = matrix_A_allreduce[thor_layer_count]
temp_g = matrix_G_allreduce[thor_layer_count]
fake_A = self.assign(self.matrix_A_cov[thor_layer_count], temp_a_ori)
fake_G = self.assign(self.matrix_G_cov[thor_layer_count], temp_g)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
temp_a = self.expand(temp_a_ori, 1)
g = self.mul(temp_a, g)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(g, temp_g)
g = self.cast(g, mstype.float32)
elif layer_type == FC:
temp_a = matrix_A_allreduce[thor_layer_count]
temp_g = matrix_G_allreduce[thor_layer_count]
fake_A = self.assign(self.matrix_A_cov[thor_layer_count], temp_a)
fake_G = self.assign(self.matrix_G_cov[thor_layer_count], temp_g)
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
temp_a = self.cast(temp_a, mstype.float16)
temp_g = self.cast(temp_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(temp_g, g)
g = self.matmul(g, temp_a)
g = self.cast(g, mstype.float32)
elif layer_type == LayerNorm:
g = self._process_layernorm(damping_step, g)
new_grads = new_grads + (g,)
gradients = new_grads
else:
new_grads = ()
gradients = self._get_second_gradients(new_grads, damping_step, gradients)
if self.is_distributed and self.conv_layer_count == 0:
gradients = self.grad_reducer_g(gradients)
self.cov_step = self.cov_step + self.one
if self.weight_decay > 0:
gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)
gradients = self.scale_grad(gradients)
lr = self.get_lr()
success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)
return success | 0.862236 | 0.520557 |
from typing import Type
from .abc_listing import ListingBaseClass
from bs4 import BeautifulSoup
from dataclasses import dataclass
@dataclass
class CarListingAdvertiser(ListingBaseClass):
__postendpoint__ = "/advertisers"
__webpageurl__ = "https://www.marktplaats.nl/v/"
name: str = "_"
activity: str = "_"
rating: str = "_"
advertiser_url: str = ""
def __init__(self, web_page_listing, crud, *args, **kwargs):
super().__init__(web_page_listing, crud, *args, **kwargs)
def get_attributes(self):
response_html = self.crud.get_html(self._listing_url)
soup = BeautifulSoup(response_html, "html.parser")
self._set_advertiser_url(soup)
if self.advertiser_url:
response_html_advertiser = self.crud.get_html(self.advertiser_url)
soup_advertiser = BeautifulSoup(response_html_advertiser, "html.parser")
self._set_advertiser_name_activity_rating(soup_advertiser)
else:
self._set_advertiser_name_activity_rating(None)
def _set_advertiser_url(self, page_soup):
top_info = page_soup.find("div", class_ = "top-info")
try:
self.advertiser_url = top_info.find("a", href=True)["href"]
except AttributeError:
self.advertiser_url = None
def _set_advertiser_name_activity_rating(self, page_soup):
if not self.advertiser_url:
return
try:
self.name = page_soup.find("div", class_ = "mp-TopSection-TitleWrap-Name").string
except Exception:
pass
try:
activity_block = page_soup.find_all("div", class_ = "mp-SellerHeaderInfo-item")
self.activity = activity_block[1].string if activity_block[1].string else "_"
self.rating = activity_block[0].string if activity_block[0].string else "_"
except Exception:
pass
@property
def __repr__(self):
return {"name": self.name,
"activity": self.activity,
"rating": self.rating}
def __str__(self):
return str(self.__repr__) | scraper/app/models/listings/car_listing_advertiser.py | from typing import Type
from .abc_listing import ListingBaseClass
from bs4 import BeautifulSoup
from dataclasses import dataclass
@dataclass
class CarListingAdvertiser(ListingBaseClass):
__postendpoint__ = "/advertisers"
__webpageurl__ = "https://www.marktplaats.nl/v/"
name: str = "_"
activity: str = "_"
rating: str = "_"
advertiser_url: str = ""
def __init__(self, web_page_listing, crud, *args, **kwargs):
super().__init__(web_page_listing, crud, *args, **kwargs)
def get_attributes(self):
response_html = self.crud.get_html(self._listing_url)
soup = BeautifulSoup(response_html, "html.parser")
self._set_advertiser_url(soup)
if self.advertiser_url:
response_html_advertiser = self.crud.get_html(self.advertiser_url)
soup_advertiser = BeautifulSoup(response_html_advertiser, "html.parser")
self._set_advertiser_name_activity_rating(soup_advertiser)
else:
self._set_advertiser_name_activity_rating(None)
def _set_advertiser_url(self, page_soup):
top_info = page_soup.find("div", class_ = "top-info")
try:
self.advertiser_url = top_info.find("a", href=True)["href"]
except AttributeError:
self.advertiser_url = None
def _set_advertiser_name_activity_rating(self, page_soup):
if not self.advertiser_url:
return
try:
self.name = page_soup.find("div", class_ = "mp-TopSection-TitleWrap-Name").string
except Exception:
pass
try:
activity_block = page_soup.find_all("div", class_ = "mp-SellerHeaderInfo-item")
self.activity = activity_block[1].string if activity_block[1].string else "_"
self.rating = activity_block[0].string if activity_block[0].string else "_"
except Exception:
pass
@property
def __repr__(self):
return {"name": self.name,
"activity": self.activity,
"rating": self.rating}
def __str__(self):
return str(self.__repr__) | 0.664976 | 0.091544 |
from __future__ import absolute_import
import socket
import unittest
from threading import Thread
from hl7apy.mllp import MLLPServer, AbstractHandler
from hl7apy.mllp import InvalidHL7Message, UnsupportedMessageType
HOST = 'localhost'
PORT = 2576
INVALID_MESSAGE = 'INVALID MESSAGE'
UNSUPPORTED_MESSAGE = 'INVALID MESSAGE'
PDQ_REQ_TPL = \
'MSH|^~\&|REC APP|REC FAC|SENDING APP|SENTING FAC|20110708163513||{}|1|D|2.5|||||ITA||EN\r' \
'QPD|IHE PDQ Query|111069|@PID.3.1^1||||\r' \
'RCP|I|'
PDQ_REQ = PDQ_REQ_TPL.format('QBP^Q22^QBP_Q21')
PDQV_REQ = PDQ_REQ_TPL.format('QBP^ZV1^QBP_Q21')
PDQ_RES_TPL = \
'MSH|^~\&|SENDING APP|SENDING FAC|REC APP|REC FAC|20110708163514||{}|2|D|2.5|||||ITA||EN\r' \
'MSA|AA|26775702551812240|\r' \
'QAK|1|OK||1|1|0\r' \
'QPD|IHE PDQ Query|111069|@PID.3.1^1010110909194822~@PID.5.1^SMITH||||\r' \
'PID|1||1^^^lis||MOUSE^MICKEY^^^^^A||19690113|M|||VIA VIA^^CAGLIARI^^^100^H^^|||||||MOSMCK|||||CAGLIARI|||||\r'
PDQ_RES = PDQ_RES_TPL.format('RSP^K22^RSP_K21')
PDQV_RES = PDQ_RES_TPL.format('RSP^ZV2^RSP_ZV2')
class PDQHandler(AbstractHandler):
def reply(self):
return PDQ_RES
class ErrorHandler(AbstractHandler):
def __init__(self, exc, msg):
super(ErrorHandler, self).__init__(msg)
self.exc = exc
def reply(self):
if isinstance(self.exc, InvalidHL7Message):
return INVALID_MESSAGE
elif isinstance(self.exc, UnsupportedMessageType):
return UNSUPPORTED_MESSAGE
class CustomArgsPDQHandler(AbstractHandler):
def __init__(self, msg, is_pdqv):
super(CustomArgsPDQHandler, self).__init__(msg)
self.is_pdqv = is_pdqv
def reply(self):
if self.is_pdqv:
return PDQV_RES
return PDQ_RES
def launch_server(host, port, handlers):
server = MLLPServer(host, port, handlers, timeout=3)
thread = Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server, thread
def stop_server(server, thread):
server.shutdown()
thread.join()
class TestMLLPWithErrorHandler(unittest.TestCase):
@classmethod
def setUpClass(cls):
handlers = {
'QBP^Q22^QBP_Q21': (PDQHandler,),
'QBP^ZV1^QBP_Q21': (CustomArgsPDQHandler, True),
'ERR': (ErrorHandler,)
}
cls.server, cls.thread = launch_server(HOST, PORT, handlers)
@classmethod
def tearDownClass(cls):
stop_server(cls.server, cls.thread)
def _client(self, msg):
# establish the connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((HOST, PORT))
sock.sendall(msg.encode('utf-8'))
res = []
while True:
received = sock.recv(1)
if not received:
break
res.append(received)
finally:
sock.close()
return b''.join(res).decode('utf-8')
def test_good_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, PDQ_RES)
def test_good_message_with_args(self):
msg = '\x0b{}\x1c\x0d'.format(PDQV_REQ)
res = self._client(msg)
self.assertEqual(res, PDQV_RES)
def test_not_er7_message(self):
msg = '\x0bWRONG\x1c\x0d'
res = self._client(msg)
self.assertEqual(res, UNSUPPORTED_MESSAGE)
def test_unsupported_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_RES)
res = self._client(msg)
self.assertEqual(res, UNSUPPORTED_MESSAGE)
def test_timeout(self):
msg = '\x0b{}\x1c'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, '')
class TestMLLPWithoutErrorHandler(unittest.TestCase):
@classmethod
def setUpClass(cls):
handlers = {
'QBP^Q22^QBP_Q21': (PDQHandler,)
}
cls.server, cls.thread = launch_server(HOST, PORT + 1, handlers)
@classmethod
def tearDownClass(cls):
stop_server(cls.server, cls.thread)
def _client(self, msg):
# establish the connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((HOST, PORT + 1))
sock.sendall(msg.encode('utf-8'))
res = []
while True:
received = sock.recv(1)
if not received:
break
res.append(received)
finally:
sock.close()
return b''.join(res).decode('utf-8')
def test_good_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, PDQ_RES)
def test_not_er7_message(self):
msg = '\x0bWRONG\x1c\x0d'
res = self._client(msg)
self.assertEqual(res, '')
def test_unsupported_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_RES)
res = self._client(msg)
self.assertEqual(res, '')
def test_timeout(self):
msg = '\x0b{}\x1c'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, '')
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMLLPWithErrorHandler))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMLLPWithoutErrorHandler))
unittest.TextTestRunner().run(suite) | tests/test_mllp.py |
from __future__ import absolute_import
import socket
import unittest
from threading import Thread
from hl7apy.mllp import MLLPServer, AbstractHandler
from hl7apy.mllp import InvalidHL7Message, UnsupportedMessageType
HOST = 'localhost'
PORT = 2576
INVALID_MESSAGE = 'INVALID MESSAGE'
UNSUPPORTED_MESSAGE = 'INVALID MESSAGE'
PDQ_REQ_TPL = \
'MSH|^~\&|REC APP|REC FAC|SENDING APP|SENTING FAC|20110708163513||{}|1|D|2.5|||||ITA||EN\r' \
'QPD|IHE PDQ Query|111069|@PID.3.1^1||||\r' \
'RCP|I|'
PDQ_REQ = PDQ_REQ_TPL.format('QBP^Q22^QBP_Q21')
PDQV_REQ = PDQ_REQ_TPL.format('QBP^ZV1^QBP_Q21')
PDQ_RES_TPL = \
'MSH|^~\&|SENDING APP|SENDING FAC|REC APP|REC FAC|20110708163514||{}|2|D|2.5|||||ITA||EN\r' \
'MSA|AA|26775702551812240|\r' \
'QAK|1|OK||1|1|0\r' \
'QPD|IHE PDQ Query|111069|@PID.3.1^1010110909194822~@PID.5.1^SMITH||||\r' \
'PID|1||1^^^lis||MOUSE^MICKEY^^^^^A||19690113|M|||VIA VIA^^CAGLIARI^^^100^H^^|||||||MOSMCK|||||CAGLIARI|||||\r'
PDQ_RES = PDQ_RES_TPL.format('RSP^K22^RSP_K21')
PDQV_RES = PDQ_RES_TPL.format('RSP^ZV2^RSP_ZV2')
class PDQHandler(AbstractHandler):
def reply(self):
return PDQ_RES
class ErrorHandler(AbstractHandler):
def __init__(self, exc, msg):
super(ErrorHandler, self).__init__(msg)
self.exc = exc
def reply(self):
if isinstance(self.exc, InvalidHL7Message):
return INVALID_MESSAGE
elif isinstance(self.exc, UnsupportedMessageType):
return UNSUPPORTED_MESSAGE
class CustomArgsPDQHandler(AbstractHandler):
def __init__(self, msg, is_pdqv):
super(CustomArgsPDQHandler, self).__init__(msg)
self.is_pdqv = is_pdqv
def reply(self):
if self.is_pdqv:
return PDQV_RES
return PDQ_RES
def launch_server(host, port, handlers):
server = MLLPServer(host, port, handlers, timeout=3)
thread = Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server, thread
def stop_server(server, thread):
server.shutdown()
thread.join()
class TestMLLPWithErrorHandler(unittest.TestCase):
@classmethod
def setUpClass(cls):
handlers = {
'QBP^Q22^QBP_Q21': (PDQHandler,),
'QBP^ZV1^QBP_Q21': (CustomArgsPDQHandler, True),
'ERR': (ErrorHandler,)
}
cls.server, cls.thread = launch_server(HOST, PORT, handlers)
@classmethod
def tearDownClass(cls):
stop_server(cls.server, cls.thread)
def _client(self, msg):
# establish the connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((HOST, PORT))
sock.sendall(msg.encode('utf-8'))
res = []
while True:
received = sock.recv(1)
if not received:
break
res.append(received)
finally:
sock.close()
return b''.join(res).decode('utf-8')
def test_good_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, PDQ_RES)
def test_good_message_with_args(self):
msg = '\x0b{}\x1c\x0d'.format(PDQV_REQ)
res = self._client(msg)
self.assertEqual(res, PDQV_RES)
def test_not_er7_message(self):
msg = '\x0bWRONG\x1c\x0d'
res = self._client(msg)
self.assertEqual(res, UNSUPPORTED_MESSAGE)
def test_unsupported_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_RES)
res = self._client(msg)
self.assertEqual(res, UNSUPPORTED_MESSAGE)
def test_timeout(self):
msg = '\x0b{}\x1c'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, '')
class TestMLLPWithoutErrorHandler(unittest.TestCase):
@classmethod
def setUpClass(cls):
handlers = {
'QBP^Q22^QBP_Q21': (PDQHandler,)
}
cls.server, cls.thread = launch_server(HOST, PORT + 1, handlers)
@classmethod
def tearDownClass(cls):
stop_server(cls.server, cls.thread)
def _client(self, msg):
# establish the connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((HOST, PORT + 1))
sock.sendall(msg.encode('utf-8'))
res = []
while True:
received = sock.recv(1)
if not received:
break
res.append(received)
finally:
sock.close()
return b''.join(res).decode('utf-8')
def test_good_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, PDQ_RES)
def test_not_er7_message(self):
msg = '\x0bWRONG\x1c\x0d'
res = self._client(msg)
self.assertEqual(res, '')
def test_unsupported_message(self):
msg = '\x0b{}\x1c\x0d'.format(PDQ_RES)
res = self._client(msg)
self.assertEqual(res, '')
def test_timeout(self):
msg = '\x0b{}\x1c'.format(PDQ_REQ)
res = self._client(msg)
self.assertEqual(res, '')
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMLLPWithErrorHandler))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMLLPWithoutErrorHandler))
unittest.TextTestRunner().run(suite) | 0.625552 | 0.161485 |
import asyncio
import json
import fakeredis
import pytest
import tempfile
import os
import rasa.utils.io
from rasa.core import training, restore
from rasa.core import utils
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.domain import Domain
from rasa.core.events import (
UserUttered,
ActionExecuted,
Restarted,
ActionReverted,
UserUtteranceReverted,
)
from rasa.core.tracker_store import (
InMemoryTrackerStore,
RedisTrackerStore,
SQLTrackerStore,
)
from rasa.core.tracker_store import TrackerStore
from rasa.core.trackers import DialogueStateTracker, EventVerbosity
from tests.core.conftest import DEFAULT_STORIES_FILE, EXAMPLE_DOMAINS, TEST_DIALOGUES
from tests.core.utilities import (
tracker_from_dialogue_file,
read_dialogue_file,
user_uttered,
get_tracker,
)
domain = Domain.load("examples/moodbot/domain.yml")
@pytest.fixture(scope="module")
def loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop = rasa.utils.io.enable_async_loop_debugging(loop)
yield loop
loop.close()
class MockRedisTrackerStore(RedisTrackerStore):
def __init__(self, domain):
self.red = fakeredis.FakeStrictRedis()
self.record_exp = None
TrackerStore.__init__(self, domain)
def stores_to_be_tested():
temp = tempfile.mkdtemp()
return [
MockRedisTrackerStore(domain),
InMemoryTrackerStore(domain),
SQLTrackerStore(domain, db=os.path.join(temp, "rasa.db")),
]
def stores_to_be_tested_ids():
return ["redis-tracker", "in-memory-tracker", "SQL-tracker"]
def test_tracker_duplicate():
filename = "data/test_dialogues/moodbot.json"
dialogue = read_dialogue_file(filename)
tracker = DialogueStateTracker(dialogue.name, domain.slots)
tracker.recreate_from_dialogue(dialogue)
num_actions = len(
[event for event in dialogue.events if isinstance(event, ActionExecuted)]
)
# There is always one duplicated tracker more than we have actions,
# as the tracker also gets duplicated for the
# action that would be next (but isn't part of the operations)
assert len(list(tracker.generate_all_prior_trackers())) == num_actions + 1
@pytest.mark.parametrize("store", stores_to_be_tested(), ids=stores_to_be_tested_ids())
def test_tracker_store_storage_and_retrieval(store):
tracker = store.get_or_create_tracker("some-id")
# the retrieved tracker should be empty
assert tracker.sender_id == "some-id"
# Action listen should be in there
assert list(tracker.events) == [ActionExecuted(ACTION_LISTEN_NAME)]
# lets log a test message
intent = {"name": "greet", "confidence": 1.0}
tracker.update(UserUttered("/greet", intent, []))
assert tracker.latest_message.intent.get("name") == "greet"
store.save(tracker)
# retrieving the same tracker should result in the same tracker
retrieved_tracker = store.get_or_create_tracker("some-id")
assert retrieved_tracker.sender_id == "some-id"
assert len(retrieved_tracker.events) == 2
assert retrieved_tracker.latest_message.intent.get("name") == "greet"
# getting another tracker should result in an empty tracker again
other_tracker = store.get_or_create_tracker("some-other-id")
assert other_tracker.sender_id == "some-other-id"
assert len(other_tracker.events) == 1
@pytest.mark.parametrize("store", stores_to_be_tested(), ids=stores_to_be_tested_ids())
@pytest.mark.parametrize("pair", zip(TEST_DIALOGUES, EXAMPLE_DOMAINS))
def test_tracker_store(store, pair):
filename, domainpath = pair
domain = Domain.load(domainpath)
tracker = tracker_from_dialogue_file(filename, domain)
store.save(tracker)
restored = store.retrieve(tracker.sender_id)
assert restored == tracker
async def test_tracker_write_to_story(tmpdir, moodbot_domain):
tracker = tracker_from_dialogue_file(
"data/test_dialogues/moodbot.json", moodbot_domain
)
p = tmpdir.join("export.md")
tracker.export_stories_to_file(p.strpath)
trackers = await training.load_data(
p.strpath,
moodbot_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
assert len(trackers) == 1
recovered = trackers[0]
assert len(recovered.events) == 11
assert recovered.events[4].type_name == "user"
assert recovered.events[4].intent == {"confidence": 1.0, "name": "mood_unhappy"}
async def test_tracker_state_regression_without_bot_utterance(default_agent):
sender_id = "test_tracker_state_regression_without_bot_utterance"
for i in range(0, 2):
await default_agent.handle_message("/greet", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
# Ensures that the tracker has changed between the utterances
# (and wasn't reset in between them)
expected = "action_listen;greet;utter_greet;action_listen;greet;action_listen"
assert (
";".join([e.as_story_string() for e in tracker.events if e.as_story_string()])
== expected
)
async def test_tracker_state_regression_with_bot_utterance(default_agent):
sender_id = "test_tracker_state_regression_with_bot_utterance"
for i in range(0, 2):
await default_agent.handle_message("/greet", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
expected = [
"action_listen",
"greet",
"utter_greet",
None,
"action_listen",
"greet",
"action_listen",
]
assert [e.as_story_string() for e in tracker.events] == expected
async def test_bot_utterance_comes_after_action_event(default_agent):
sender_id = "test_bot_utterance_comes_after_action_event"
await default_agent.handle_message("/greet", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
# important is, that the 'bot' comes after the second 'action' and not
# before
expected = ["action", "user", "action", "bot", "action"]
assert [e.type_name for e in tracker.events] == expected
def test_tracker_entity_retrieval(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
assert list(tracker.get_latest_entity_values("entity_name")) == []
intent = {"name": "greet", "confidence": 1.0}
tracker.update(
UserUttered(
"/greet",
intent,
[
{
"start": 1,
"end": 5,
"value": "greet",
"entity": "entity_name",
"extractor": "manual",
}
],
)
)
assert list(tracker.get_latest_entity_values("entity_name")) == ["greet"]
assert list(tracker.get_latest_entity_values("unknown")) == []
def test_tracker_update_slots_with_entity(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
test_entity = default_domain.entities[0]
expected_slot_value = "test user"
intent = {"name": "greet", "confidence": 1.0}
tracker.update(
UserUttered(
"/greet",
intent,
[
{
"start": 1,
"end": 5,
"value": expected_slot_value,
"entity": test_entity,
"extractor": "manual",
}
],
),
default_domain,
)
assert tracker.get_slot(test_entity) == expected_slot_value
def test_restart_event(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent, []))
tracker.update(ActionExecuted("my_action"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
assert len(tracker.events) == 4
assert tracker.latest_message.text == "/greet"
assert len(list(tracker.generate_all_prior_trackers())) == 4
tracker.update(Restarted())
assert len(tracker.events) == 5
assert tracker.followup_action is not None
assert tracker.followup_action == ACTION_LISTEN_NAME
assert tracker.latest_message.text is None
assert len(list(tracker.generate_all_prior_trackers())) == 1
dialogue = tracker.as_dialogue()
recovered = DialogueStateTracker("default", default_domain.slots)
recovered.recreate_from_dialogue(dialogue)
assert recovered.current_state() == tracker.current_state()
assert len(recovered.events) == 5
assert recovered.latest_message.text is None
assert len(list(recovered.generate_all_prior_trackers())) == 1
def test_revert_action_event(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent, []))
tracker.update(ActionExecuted("my_action"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
# Expecting count of 4:
# +3 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(list(tracker.generate_all_prior_trackers())) == 4
tracker.update(ActionReverted())
# Expecting count of 3:
# +3 executed actions
# +1 final state
# -1 reverted action
assert tracker.latest_action_name == "my_action"
assert len(list(tracker.generate_all_prior_trackers())) == 3
dialogue = tracker.as_dialogue()
recovered = DialogueStateTracker("default", default_domain.slots)
recovered.recreate_from_dialogue(dialogue)
assert recovered.current_state() == tracker.current_state()
assert tracker.latest_action_name == "my_action"
assert len(list(tracker.generate_all_prior_trackers())) == 3
def test_revert_user_utterance_event(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent1 = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent1, []))
tracker.update(ActionExecuted("my_action_1"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
intent2 = {"name": "goodbye", "confidence": 1.0}
tracker.update(UserUttered("/goodbye", intent2, []))
tracker.update(ActionExecuted("my_action_2"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
# Expecting count of 6:
# +5 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(list(tracker.generate_all_prior_trackers())) == 6
tracker.update(UserUtteranceReverted())
# Expecting count of 3:
# +5 executed actions
# +1 final state
# -2 rewound actions associated with the /goodbye
# -1 rewound action from the listen right before /goodbye
assert tracker.latest_action_name == "my_action_1"
assert len(list(tracker.generate_all_prior_trackers())) == 3
dialogue = tracker.as_dialogue()
recovered = DialogueStateTracker("default", default_domain.slots)
recovered.recreate_from_dialogue(dialogue)
assert recovered.current_state() == tracker.current_state()
assert tracker.latest_action_name == "my_action_1"
assert len(list(tracker.generate_all_prior_trackers())) == 3
def test_traveling_back_in_time(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent, []))
import time
time.sleep(1)
time_for_timemachine = time.time()
time.sleep(1)
tracker.update(ActionExecuted("my_action"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
# Expecting count of 4:
# +3 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(tracker.events) == 4
assert len(list(tracker.generate_all_prior_trackers())) == 4
tracker = tracker.travel_back_in_time(time_for_timemachine)
# Expecting count of 2:
# +1 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(tracker.events) == 2
assert len(list(tracker.generate_all_prior_trackers())) == 2
async def test_dump_and_restore_as_json(default_agent, tmpdir_factory):
trackers = await default_agent.load_data(DEFAULT_STORIES_FILE)
for tracker in trackers:
out_path = tmpdir_factory.mktemp("tracker").join("dumped_tracker.json")
dumped = tracker.current_state(EventVerbosity.AFTER_RESTART)
utils.dump_obj_as_json_to_file(out_path.strpath, dumped)
restored_tracker = restore.load_tracker_from_json(
out_path.strpath, default_agent.domain
)
assert restored_tracker == tracker
def test_read_json_dump(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
restored_tracker = restore.load_tracker_from_json(
tracker_dump, default_agent.domain
)
assert len(restored_tracker.events) == 7
assert restored_tracker.latest_action_name == "action_listen"
assert not restored_tracker.is_paused()
assert restored_tracker.sender_id == "mysender"
assert restored_tracker.events[-1].timestamp == 1517821726.211042
restored_state = restored_tracker.current_state(EventVerbosity.AFTER_RESTART)
assert restored_state == tracker_json
def test_current_state_after_restart(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
tracker_json["events"].insert(3, {"event": "restart"})
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
events_after_restart = [e.as_dict() for e in list(tracker.events)[4:]]
state = tracker.current_state(EventVerbosity.AFTER_RESTART)
assert state.get("events") == events_after_restart
def test_current_state_all_events(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
tracker_json["events"].insert(3, {"event": "restart"})
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
evts = [e.as_dict() for e in tracker.events]
state = tracker.current_state(EventVerbosity.ALL)
assert state.get("events") == evts
def test_current_state_no_events(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
state = tracker.current_state(EventVerbosity.NONE)
assert state.get("events") is None
def test_current_state_applied_events(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
# add some events that result in other events not being applied anymore
tracker_json["events"].insert(1, {"event": "restart"})
tracker_json["events"].insert(7, {"event": "rewind"})
tracker_json["events"].insert(8, {"event": "undo"})
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
evts = [e.as_dict() for e in tracker.events]
applied_events = [evts[2], evts[9]]
state = tracker.current_state(EventVerbosity.APPLIED)
assert state.get("events") == applied_events
async def test_tracker_dump_e2e_story(default_agent):
sender_id = "test_tracker_dump_e2e_story"
await default_agent.handle_message("/greet", sender_id=sender_id)
await default_agent.handle_message("/goodbye", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
story = tracker.export_stories(e2e=True)
assert story.strip().split("\n") == [
"## test_tracker_dump_e2e_story",
"* greet: /greet",
" - utter_greet",
"* goodbye: /goodbye",
]
def test_get_last_event_for():
events = [ActionExecuted("one"), user_uttered("two", 1)]
tracker = get_tracker(events)
assert tracker.get_last_event_for(ActionExecuted).action_name == "one"
def test_get_last_event_with_reverted():
events = [ActionExecuted("one"), ActionReverted(), user_uttered("two", 1)]
tracker = get_tracker(events)
assert tracker.get_last_event_for(ActionExecuted) is None
def test_get_last_event_for_with_skip():
events = [ActionExecuted("one"), user_uttered("two", 1), ActionExecuted("three")]
tracker = get_tracker(events)
assert tracker.get_last_event_for(ActionExecuted, skip=1).action_name == "one"
def test_get_last_event_for_with_exclude():
events = [ActionExecuted("one"), user_uttered("two", 1), ActionExecuted("three")]
tracker = get_tracker(events)
assert (
tracker.get_last_event_for(
ActionExecuted, action_names_to_exclude=["three"]
).action_name
== "one"
)
def test_last_executed_has():
events = [
ActionExecuted("one"),
user_uttered("two", 1),
ActionExecuted(ACTION_LISTEN_NAME),
]
tracker = get_tracker(events)
assert tracker.last_executed_action_has("one") is True
def test_last_executed_has_not_name():
events = [
ActionExecuted("one"),
user_uttered("two", 1),
ActionExecuted(ACTION_LISTEN_NAME),
]
tracker = get_tracker(events)
assert tracker.last_executed_action_has("another") is False | tests/core/test_trackers.py | import asyncio
import json
import fakeredis
import pytest
import tempfile
import os
import rasa.utils.io
from rasa.core import training, restore
from rasa.core import utils
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.domain import Domain
from rasa.core.events import (
UserUttered,
ActionExecuted,
Restarted,
ActionReverted,
UserUtteranceReverted,
)
from rasa.core.tracker_store import (
InMemoryTrackerStore,
RedisTrackerStore,
SQLTrackerStore,
)
from rasa.core.tracker_store import TrackerStore
from rasa.core.trackers import DialogueStateTracker, EventVerbosity
from tests.core.conftest import DEFAULT_STORIES_FILE, EXAMPLE_DOMAINS, TEST_DIALOGUES
from tests.core.utilities import (
tracker_from_dialogue_file,
read_dialogue_file,
user_uttered,
get_tracker,
)
domain = Domain.load("examples/moodbot/domain.yml")
@pytest.fixture(scope="module")
def loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop = rasa.utils.io.enable_async_loop_debugging(loop)
yield loop
loop.close()
class MockRedisTrackerStore(RedisTrackerStore):
def __init__(self, domain):
self.red = fakeredis.FakeStrictRedis()
self.record_exp = None
TrackerStore.__init__(self, domain)
def stores_to_be_tested():
temp = tempfile.mkdtemp()
return [
MockRedisTrackerStore(domain),
InMemoryTrackerStore(domain),
SQLTrackerStore(domain, db=os.path.join(temp, "rasa.db")),
]
def stores_to_be_tested_ids():
return ["redis-tracker", "in-memory-tracker", "SQL-tracker"]
def test_tracker_duplicate():
filename = "data/test_dialogues/moodbot.json"
dialogue = read_dialogue_file(filename)
tracker = DialogueStateTracker(dialogue.name, domain.slots)
tracker.recreate_from_dialogue(dialogue)
num_actions = len(
[event for event in dialogue.events if isinstance(event, ActionExecuted)]
)
# There is always one duplicated tracker more than we have actions,
# as the tracker also gets duplicated for the
# action that would be next (but isn't part of the operations)
assert len(list(tracker.generate_all_prior_trackers())) == num_actions + 1
@pytest.mark.parametrize("store", stores_to_be_tested(), ids=stores_to_be_tested_ids())
def test_tracker_store_storage_and_retrieval(store):
tracker = store.get_or_create_tracker("some-id")
# the retrieved tracker should be empty
assert tracker.sender_id == "some-id"
# Action listen should be in there
assert list(tracker.events) == [ActionExecuted(ACTION_LISTEN_NAME)]
# lets log a test message
intent = {"name": "greet", "confidence": 1.0}
tracker.update(UserUttered("/greet", intent, []))
assert tracker.latest_message.intent.get("name") == "greet"
store.save(tracker)
# retrieving the same tracker should result in the same tracker
retrieved_tracker = store.get_or_create_tracker("some-id")
assert retrieved_tracker.sender_id == "some-id"
assert len(retrieved_tracker.events) == 2
assert retrieved_tracker.latest_message.intent.get("name") == "greet"
# getting another tracker should result in an empty tracker again
other_tracker = store.get_or_create_tracker("some-other-id")
assert other_tracker.sender_id == "some-other-id"
assert len(other_tracker.events) == 1
@pytest.mark.parametrize("store", stores_to_be_tested(), ids=stores_to_be_tested_ids())
@pytest.mark.parametrize("pair", zip(TEST_DIALOGUES, EXAMPLE_DOMAINS))
def test_tracker_store(store, pair):
filename, domainpath = pair
domain = Domain.load(domainpath)
tracker = tracker_from_dialogue_file(filename, domain)
store.save(tracker)
restored = store.retrieve(tracker.sender_id)
assert restored == tracker
async def test_tracker_write_to_story(tmpdir, moodbot_domain):
tracker = tracker_from_dialogue_file(
"data/test_dialogues/moodbot.json", moodbot_domain
)
p = tmpdir.join("export.md")
tracker.export_stories_to_file(p.strpath)
trackers = await training.load_data(
p.strpath,
moodbot_domain,
use_story_concatenation=False,
tracker_limit=1000,
remove_duplicates=False,
)
assert len(trackers) == 1
recovered = trackers[0]
assert len(recovered.events) == 11
assert recovered.events[4].type_name == "user"
assert recovered.events[4].intent == {"confidence": 1.0, "name": "mood_unhappy"}
async def test_tracker_state_regression_without_bot_utterance(default_agent):
sender_id = "test_tracker_state_regression_without_bot_utterance"
for i in range(0, 2):
await default_agent.handle_message("/greet", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
# Ensures that the tracker has changed between the utterances
# (and wasn't reset in between them)
expected = "action_listen;greet;utter_greet;action_listen;greet;action_listen"
assert (
";".join([e.as_story_string() for e in tracker.events if e.as_story_string()])
== expected
)
async def test_tracker_state_regression_with_bot_utterance(default_agent):
sender_id = "test_tracker_state_regression_with_bot_utterance"
for i in range(0, 2):
await default_agent.handle_message("/greet", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
expected = [
"action_listen",
"greet",
"utter_greet",
None,
"action_listen",
"greet",
"action_listen",
]
assert [e.as_story_string() for e in tracker.events] == expected
async def test_bot_utterance_comes_after_action_event(default_agent):
sender_id = "test_bot_utterance_comes_after_action_event"
await default_agent.handle_message("/greet", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
# important is, that the 'bot' comes after the second 'action' and not
# before
expected = ["action", "user", "action", "bot", "action"]
assert [e.type_name for e in tracker.events] == expected
def test_tracker_entity_retrieval(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
assert list(tracker.get_latest_entity_values("entity_name")) == []
intent = {"name": "greet", "confidence": 1.0}
tracker.update(
UserUttered(
"/greet",
intent,
[
{
"start": 1,
"end": 5,
"value": "greet",
"entity": "entity_name",
"extractor": "manual",
}
],
)
)
assert list(tracker.get_latest_entity_values("entity_name")) == ["greet"]
assert list(tracker.get_latest_entity_values("unknown")) == []
def test_tracker_update_slots_with_entity(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
test_entity = default_domain.entities[0]
expected_slot_value = "test user"
intent = {"name": "greet", "confidence": 1.0}
tracker.update(
UserUttered(
"/greet",
intent,
[
{
"start": 1,
"end": 5,
"value": expected_slot_value,
"entity": test_entity,
"extractor": "manual",
}
],
),
default_domain,
)
assert tracker.get_slot(test_entity) == expected_slot_value
def test_restart_event(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent, []))
tracker.update(ActionExecuted("my_action"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
assert len(tracker.events) == 4
assert tracker.latest_message.text == "/greet"
assert len(list(tracker.generate_all_prior_trackers())) == 4
tracker.update(Restarted())
assert len(tracker.events) == 5
assert tracker.followup_action is not None
assert tracker.followup_action == ACTION_LISTEN_NAME
assert tracker.latest_message.text is None
assert len(list(tracker.generate_all_prior_trackers())) == 1
dialogue = tracker.as_dialogue()
recovered = DialogueStateTracker("default", default_domain.slots)
recovered.recreate_from_dialogue(dialogue)
assert recovered.current_state() == tracker.current_state()
assert len(recovered.events) == 5
assert recovered.latest_message.text is None
assert len(list(recovered.generate_all_prior_trackers())) == 1
def test_revert_action_event(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent, []))
tracker.update(ActionExecuted("my_action"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
# Expecting count of 4:
# +3 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(list(tracker.generate_all_prior_trackers())) == 4
tracker.update(ActionReverted())
# Expecting count of 3:
# +3 executed actions
# +1 final state
# -1 reverted action
assert tracker.latest_action_name == "my_action"
assert len(list(tracker.generate_all_prior_trackers())) == 3
dialogue = tracker.as_dialogue()
recovered = DialogueStateTracker("default", default_domain.slots)
recovered.recreate_from_dialogue(dialogue)
assert recovered.current_state() == tracker.current_state()
assert tracker.latest_action_name == "my_action"
assert len(list(tracker.generate_all_prior_trackers())) == 3
def test_revert_user_utterance_event(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent1 = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent1, []))
tracker.update(ActionExecuted("my_action_1"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
intent2 = {"name": "goodbye", "confidence": 1.0}
tracker.update(UserUttered("/goodbye", intent2, []))
tracker.update(ActionExecuted("my_action_2"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
# Expecting count of 6:
# +5 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(list(tracker.generate_all_prior_trackers())) == 6
tracker.update(UserUtteranceReverted())
# Expecting count of 3:
# +5 executed actions
# +1 final state
# -2 rewound actions associated with the /goodbye
# -1 rewound action from the listen right before /goodbye
assert tracker.latest_action_name == "my_action_1"
assert len(list(tracker.generate_all_prior_trackers())) == 3
dialogue = tracker.as_dialogue()
recovered = DialogueStateTracker("default", default_domain.slots)
recovered.recreate_from_dialogue(dialogue)
assert recovered.current_state() == tracker.current_state()
assert tracker.latest_action_name == "my_action_1"
assert len(list(tracker.generate_all_prior_trackers())) == 3
def test_traveling_back_in_time(default_domain):
tracker = DialogueStateTracker("default", default_domain.slots)
# the retrieved tracker should be empty
assert len(tracker.events) == 0
intent = {"name": "greet", "confidence": 1.0}
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
tracker.update(UserUttered("/greet", intent, []))
import time
time.sleep(1)
time_for_timemachine = time.time()
time.sleep(1)
tracker.update(ActionExecuted("my_action"))
tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
# Expecting count of 4:
# +3 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(tracker.events) == 4
assert len(list(tracker.generate_all_prior_trackers())) == 4
tracker = tracker.travel_back_in_time(time_for_timemachine)
# Expecting count of 2:
# +1 executed actions
# +1 final state
assert tracker.latest_action_name == ACTION_LISTEN_NAME
assert len(tracker.events) == 2
assert len(list(tracker.generate_all_prior_trackers())) == 2
async def test_dump_and_restore_as_json(default_agent, tmpdir_factory):
trackers = await default_agent.load_data(DEFAULT_STORIES_FILE)
for tracker in trackers:
out_path = tmpdir_factory.mktemp("tracker").join("dumped_tracker.json")
dumped = tracker.current_state(EventVerbosity.AFTER_RESTART)
utils.dump_obj_as_json_to_file(out_path.strpath, dumped)
restored_tracker = restore.load_tracker_from_json(
out_path.strpath, default_agent.domain
)
assert restored_tracker == tracker
def test_read_json_dump(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
restored_tracker = restore.load_tracker_from_json(
tracker_dump, default_agent.domain
)
assert len(restored_tracker.events) == 7
assert restored_tracker.latest_action_name == "action_listen"
assert not restored_tracker.is_paused()
assert restored_tracker.sender_id == "mysender"
assert restored_tracker.events[-1].timestamp == 1517821726.211042
restored_state = restored_tracker.current_state(EventVerbosity.AFTER_RESTART)
assert restored_state == tracker_json
def test_current_state_after_restart(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
tracker_json["events"].insert(3, {"event": "restart"})
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
events_after_restart = [e.as_dict() for e in list(tracker.events)[4:]]
state = tracker.current_state(EventVerbosity.AFTER_RESTART)
assert state.get("events") == events_after_restart
def test_current_state_all_events(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
tracker_json["events"].insert(3, {"event": "restart"})
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
evts = [e.as_dict() for e in tracker.events]
state = tracker.current_state(EventVerbosity.ALL)
assert state.get("events") == evts
def test_current_state_no_events(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
state = tracker.current_state(EventVerbosity.NONE)
assert state.get("events") is None
def test_current_state_applied_events(default_agent):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
tracker_json = json.loads(rasa.utils.io.read_file(tracker_dump))
# add some events that result in other events not being applied anymore
tracker_json["events"].insert(1, {"event": "restart"})
tracker_json["events"].insert(7, {"event": "rewind"})
tracker_json["events"].insert(8, {"event": "undo"})
tracker = DialogueStateTracker.from_dict(
tracker_json.get("sender_id"),
tracker_json.get("events", []),
default_agent.domain.slots,
)
evts = [e.as_dict() for e in tracker.events]
applied_events = [evts[2], evts[9]]
state = tracker.current_state(EventVerbosity.APPLIED)
assert state.get("events") == applied_events
async def test_tracker_dump_e2e_story(default_agent):
sender_id = "test_tracker_dump_e2e_story"
await default_agent.handle_message("/greet", sender_id=sender_id)
await default_agent.handle_message("/goodbye", sender_id=sender_id)
tracker = default_agent.tracker_store.get_or_create_tracker(sender_id)
story = tracker.export_stories(e2e=True)
assert story.strip().split("\n") == [
"## test_tracker_dump_e2e_story",
"* greet: /greet",
" - utter_greet",
"* goodbye: /goodbye",
]
def test_get_last_event_for():
events = [ActionExecuted("one"), user_uttered("two", 1)]
tracker = get_tracker(events)
assert tracker.get_last_event_for(ActionExecuted).action_name == "one"
def test_get_last_event_with_reverted():
events = [ActionExecuted("one"), ActionReverted(), user_uttered("two", 1)]
tracker = get_tracker(events)
assert tracker.get_last_event_for(ActionExecuted) is None
def test_get_last_event_for_with_skip():
events = [ActionExecuted("one"), user_uttered("two", 1), ActionExecuted("three")]
tracker = get_tracker(events)
assert tracker.get_last_event_for(ActionExecuted, skip=1).action_name == "one"
def test_get_last_event_for_with_exclude():
events = [ActionExecuted("one"), user_uttered("two", 1), ActionExecuted("three")]
tracker = get_tracker(events)
assert (
tracker.get_last_event_for(
ActionExecuted, action_names_to_exclude=["three"]
).action_name
== "one"
)
def test_last_executed_has():
events = [
ActionExecuted("one"),
user_uttered("two", 1),
ActionExecuted(ACTION_LISTEN_NAME),
]
tracker = get_tracker(events)
assert tracker.last_executed_action_has("one") is True
def test_last_executed_has_not_name():
events = [
ActionExecuted("one"),
user_uttered("two", 1),
ActionExecuted(ACTION_LISTEN_NAME),
]
tracker = get_tracker(events)
assert tracker.last_executed_action_has("another") is False | 0.4917 | 0.25724 |
import copy
from typing import Dict
import torch
import torch.nn as nn
import texar.torch as tx
__all__ = [
"MetaModule",
"TexarBertMetaModule"
]
class MetaModule(nn.ModuleList):
# pylint: disable=line-too-long
r"""A class extending :class:`torch.nn.Module`
that registers the parameters of a :class:`torch.nn.Module`
and performs memory-efficient parameter updates locally.
This code is adapted from:
https://github.com/tanyuqian/learning-data-manipulation/blob/master/magic_module.py
It implements the calculation:
:math:`L(\theta - \nabla_{\theta} L_{train}(\theta, \phi))`.
Args:
module: A :class:`torch.nn.Module`.
This class can be used for simple input module, whose sub-modules don't
contain other helper functions or attributes that do not belong to this
class to perform their :meth:`forward`.
Otherwise, since :meth:`forward` calls the input module's :meth:`forward`,
in order to perform :meth:`forward` of the sub-modules of the input module
correctly, this class needs to extend those sub-modules that define
the methods needed for their :meth:`forward`, so that it inherits their
methods to perform the sub-module's :meth:`forward`.
For example, if the input module is :class:`tx.modules.BERTClassifier`,
:meth:`_get_noise_shape`, :meth:`_split_heads`, :meth:`_combine_heads`
from its sub-modules (Eg. :class:`tx.modules.BERTEncoder`) are needed to be
exposed in this class to perform their :meth:`forward`. Please refer to
:class:`TexarBertMetaModule` for instructions on creating a subclass from
this one for a specific input module.
"""
def __init__(self, module: nn.Module):
nn.Module.__init__(self)
self._type = type(module)
for key, value in module._parameters.items():
if value is not None:
self.register_parameter('_origin_' + key, value)
self.register_buffer(key, value.data)
else:
self.register_buffer(key, None)
for key, value in module._buffers.items():
self.register_buffer(key, copy.deepcopy(value))
# Recursively create MetaModule.
for key, value in module._modules.items():
# type(self) is the real class object
# it can be MetaModule(value), or it can be its subclass,
# eg. TexarBertMetaModule(value)
self.add_module(key, type(self)(value))
for key, value in module.__dict__.items():
if key not in self.__dict__ and\
key not in self._buffers and\
key not in self._modules:
self.__setattr__(key, value)
def forward(self, *args, **kwargs):
return self._type.forward(self, *args, **kwargs)
def update_params(self, deltas: Dict[str, torch.Tensor]):
sub_params: Dict[str, torch.Tensor] = {}
for key, delta in deltas.items():
if '.' not in key:
self._buffers[key] = self._buffers[key] + delta
else:
attr = key.split('.')[0]
if attr not in sub_params:
sub_params[attr] = {}
sub_params[attr]['.'.join(key.split('.')[1:])] = delta
for key, value in sub_params.items():
self._modules[key].update_params(value)
class TexarBertMetaModule(MetaModule,
tx.modules.EmbedderBase,
tx.modules.MultiheadAttentionEncoder):
r"""A subclass that extends :class:`MetaModule` to do parameter updates
locally for texar-pytorch Bert related modules.
Eg. :class:`tx.modules.BERTClassifier`
Please refer to its base class :class:`MetaModule` for more details.
Args:
module: A :class:`torch.nn.Module`.
This class extends :class:`tx.modules.EmbedderBase` and
:class:`tx.modules.MultiheadAttentionEncoder`, such that it inherits
their methods that are needed to perform :meth:`forward` of the modules
that utilizes these methods, Eg. :class:`tx.modules.BERTEncoder`,
:class:`tx.modules.WordEmbedder`.
Some notes of the order of the base classes that this class extends:
`MetaModule` should be the first one, so that its :meth:`forward` will
call :meth:`MetaModule.forward` instead of the :meth:`forward` of the other
base classes, such as :meth:`MultiheadAttentionEncoder.forward`.
If `MetaModule` is not the first one, then a :meth:`forward` should be
defined in this class, such that it is called correctly.
Example:
.. code-block:: python
def forward(self, *args, **kwargs):
return MetaModule.forward(self, *args, **kwargs)
"""
def __init__(self, module: nn.Module):
MetaModule.__init__(self, module) | forte/models/da_rl/magic_model.py | import copy
from typing import Dict
import torch
import torch.nn as nn
import texar.torch as tx
__all__ = [
"MetaModule",
"TexarBertMetaModule"
]
class MetaModule(nn.ModuleList):
# pylint: disable=line-too-long
r"""A class extending :class:`torch.nn.Module`
that registers the parameters of a :class:`torch.nn.Module`
and performs memory-efficient parameter updates locally.
This code is adapted from:
https://github.com/tanyuqian/learning-data-manipulation/blob/master/magic_module.py
It implements the calculation:
:math:`L(\theta - \nabla_{\theta} L_{train}(\theta, \phi))`.
Args:
module: A :class:`torch.nn.Module`.
This class can be used for simple input module, whose sub-modules don't
contain other helper functions or attributes that do not belong to this
class to perform their :meth:`forward`.
Otherwise, since :meth:`forward` calls the input module's :meth:`forward`,
in order to perform :meth:`forward` of the sub-modules of the input module
correctly, this class needs to extend those sub-modules that define
the methods needed for their :meth:`forward`, so that it inherits their
methods to perform the sub-module's :meth:`forward`.
For example, if the input module is :class:`tx.modules.BERTClassifier`,
:meth:`_get_noise_shape`, :meth:`_split_heads`, :meth:`_combine_heads`
from its sub-modules (Eg. :class:`tx.modules.BERTEncoder`) are needed to be
exposed in this class to perform their :meth:`forward`. Please refer to
:class:`TexarBertMetaModule` for instructions on creating a subclass from
this one for a specific input module.
"""
def __init__(self, module: nn.Module):
nn.Module.__init__(self)
self._type = type(module)
for key, value in module._parameters.items():
if value is not None:
self.register_parameter('_origin_' + key, value)
self.register_buffer(key, value.data)
else:
self.register_buffer(key, None)
for key, value in module._buffers.items():
self.register_buffer(key, copy.deepcopy(value))
# Recursively create MetaModule.
for key, value in module._modules.items():
# type(self) is the real class object
# it can be MetaModule(value), or it can be its subclass,
# eg. TexarBertMetaModule(value)
self.add_module(key, type(self)(value))
for key, value in module.__dict__.items():
if key not in self.__dict__ and\
key not in self._buffers and\
key not in self._modules:
self.__setattr__(key, value)
def forward(self, *args, **kwargs):
return self._type.forward(self, *args, **kwargs)
def update_params(self, deltas: Dict[str, torch.Tensor]):
sub_params: Dict[str, torch.Tensor] = {}
for key, delta in deltas.items():
if '.' not in key:
self._buffers[key] = self._buffers[key] + delta
else:
attr = key.split('.')[0]
if attr not in sub_params:
sub_params[attr] = {}
sub_params[attr]['.'.join(key.split('.')[1:])] = delta
for key, value in sub_params.items():
self._modules[key].update_params(value)
class TexarBertMetaModule(MetaModule,
tx.modules.EmbedderBase,
tx.modules.MultiheadAttentionEncoder):
r"""A subclass that extends :class:`MetaModule` to do parameter updates
locally for texar-pytorch Bert related modules.
Eg. :class:`tx.modules.BERTClassifier`
Please refer to its base class :class:`MetaModule` for more details.
Args:
module: A :class:`torch.nn.Module`.
This class extends :class:`tx.modules.EmbedderBase` and
:class:`tx.modules.MultiheadAttentionEncoder`, such that it inherits
their methods that are needed to perform :meth:`forward` of the modules
that utilizes these methods, Eg. :class:`tx.modules.BERTEncoder`,
:class:`tx.modules.WordEmbedder`.
Some notes of the order of the base classes that this class extends:
`MetaModule` should be the first one, so that its :meth:`forward` will
call :meth:`MetaModule.forward` instead of the :meth:`forward` of the other
base classes, such as :meth:`MultiheadAttentionEncoder.forward`.
If `MetaModule` is not the first one, then a :meth:`forward` should be
defined in this class, such that it is called correctly.
Example:
.. code-block:: python
def forward(self, *args, **kwargs):
return MetaModule.forward(self, *args, **kwargs)
"""
def __init__(self, module: nn.Module):
MetaModule.__init__(self, module) | 0.893114 | 0.444324 |
import logging
import re
from contextlib import contextmanager
from functools import wraps
import requests
from zeep import Client
from .services import BaseService
log = logging.getLogger()
def to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
class Trend:
def __init__(self, base_url, username=None, password=<PASSWORD>):
self.base_url = base_url
self.username = username
self.password = password
self.session = requests.Session()
self._register_rest_services()
if username is not None and password is not None:
self.login()
def _register_rest_services(self):
for cls in BaseService.__subclasses__():
name = to_snake(cls.__name__.replace("Service", ""))
service = cls(self.session, self.base_url)
setattr(self, name, service)
@property
def soap(self):
if not hasattr(self, "_soap"):
self._soap = SoapWrapper(self.base_url)
self._soap.sid = self._sid
return self._soap
@property
def _sid(self):
return self.session.params["sID"]
@_sid.setter
def _sid(self, value):
if value is None:
self.session.params.pop("sID", None)
else:
self.session.params.update({"sID": value})
def login(self):
self._sid = self.authentication.login(self.username, self.password)
def logout(self):
self.authentication.logout()
self._sid = None
@contextmanager
def as_tenant(self, tenant_name):
root_session_id = self._sid
self._sid = self.authentication.tenant_login(tenant_name)
yield self
self.authentication.logout()
self._sid = root_session_id
def api_version(self):
response = self.session.get(f"{self.base_url}/rest/apiVersion")
return {"version": response.text}
class SoapWrapper:
def __init__(self, base_url):
self._client = Client(f"{base_url}/webservice/Manager?WSDL")
def __getattr__(self, attr):
attr = getattr(self._client.service, attr)
if not callable(attr):
return attr
@wraps(attr)
def _wrapped(*args, **kwargs):
defaults = {"sID": self.sid}
defaults.update(kwargs)
return attr(*args, **defaults)
return _wrapped | pydsec/pydsec.py | import logging
import re
from contextlib import contextmanager
from functools import wraps
import requests
from zeep import Client
from .services import BaseService
log = logging.getLogger()
def to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
class Trend:
def __init__(self, base_url, username=None, password=<PASSWORD>):
self.base_url = base_url
self.username = username
self.password = password
self.session = requests.Session()
self._register_rest_services()
if username is not None and password is not None:
self.login()
def _register_rest_services(self):
for cls in BaseService.__subclasses__():
name = to_snake(cls.__name__.replace("Service", ""))
service = cls(self.session, self.base_url)
setattr(self, name, service)
@property
def soap(self):
if not hasattr(self, "_soap"):
self._soap = SoapWrapper(self.base_url)
self._soap.sid = self._sid
return self._soap
@property
def _sid(self):
return self.session.params["sID"]
@_sid.setter
def _sid(self, value):
if value is None:
self.session.params.pop("sID", None)
else:
self.session.params.update({"sID": value})
def login(self):
self._sid = self.authentication.login(self.username, self.password)
def logout(self):
self.authentication.logout()
self._sid = None
@contextmanager
def as_tenant(self, tenant_name):
root_session_id = self._sid
self._sid = self.authentication.tenant_login(tenant_name)
yield self
self.authentication.logout()
self._sid = root_session_id
def api_version(self):
response = self.session.get(f"{self.base_url}/rest/apiVersion")
return {"version": response.text}
class SoapWrapper:
def __init__(self, base_url):
self._client = Client(f"{base_url}/webservice/Manager?WSDL")
def __getattr__(self, attr):
attr = getattr(self._client.service, attr)
if not callable(attr):
return attr
@wraps(attr)
def _wrapped(*args, **kwargs):
defaults = {"sID": self.sid}
defaults.update(kwargs)
return attr(*args, **defaults)
return _wrapped | 0.469763 | 0.054727 |
import argparse
import gym
import ptan
import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from ignite.engine import Engine
from lib import dqn_extra, dqn_model, epsilon_tracker, hyper_params, utils
def calc_loss(batch, _net, _target_net, gamma, _device="cpu"):
states, actions, rewards, dones, next_states = utils.unpack_batch(batch)
batch_size = len(batch)
states_v = torch.tensor(states).to(_device)
actions_v = torch.tensor(actions).to(_device)
next_states_v = torch.tensor(next_states).to(_device)
next_distr_v, next_qvals_v = _target_net.both(next_states_v)
next_acts = next_qvals_v.max(1)[1].data.cpu().numpy()
next_distr = _target_net.apply_softmax(next_distr_v)
next_distr = next_distr.data.cpu().numpy()
next_best_distr = next_distr[range(batch_size), next_acts]
dones = dones.astype(np.bool)
proj_distr = dqn_extra.distr_projection(next_best_distr, rewards, dones, gamma)
distr_v = _net(states_v)
sa_vals = distr_v[range(batch_size), actions_v.data]
state_log_sm_v = F.log_softmax(sa_vals, dim=1)
proj_distr_v = torch.tensor(proj_distr).to(_device)
loss_v = -state_log_sm_v * proj_distr_v
return loss_v.sum(dim=1).mean()
if __name__ == "__main__":
random.seed(hyper_params.SEED)
torch.manual_seed(hyper_params.SEED)
params = hyper_params.HYPER_PARAMS['pong']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable CUDA")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params.env_name)
env = ptan.common.wrappers.wrap_dqn(env)
env.seed(hyper_params.SEED)
net = dqn_extra.CategoricalDQN(env.observation_space.shape, env.action_space.n).to(device)
target_net = ptan.agent.TargetNet(net)
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params.epsilon_start)
epsilon_tracker = epsilon_tracker.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(lambda x: net.qvals(x), selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params.gamma)
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=params.replay_size)
optimizer = optim.Adam(net.parameters(), lr=params.learning_rate)
def process_batch(engine_for_batch, batch):
optimizer.zero_grad()
loss_v = calc_loss(batch, net, target_net.target_model, gamma=params.gamma, _device=device)
loss_v.backward()
optimizer.step()
epsilon_tracker.frame(engine_for_batch.state.iteration)
if engine_for_batch.state.iteration % params.target_net_sync == 0:
target_net.sync()
return {
"loss": loss_v.item(),
"epsilon": selector.epsilon,
}
engine = Engine(process_batch)
utils.setup_ignite(engine, params, exp_source, "07_DQN_Categorical")
engine.run(utils.batch_generator(buffer, params.replay_initial, params.batch_size)) | 2 - Code/210929 - DQN Extensions #2/03 - DQN Categorical.py | import argparse
import gym
import ptan
import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from ignite.engine import Engine
from lib import dqn_extra, dqn_model, epsilon_tracker, hyper_params, utils
def calc_loss(batch, _net, _target_net, gamma, _device="cpu"):
states, actions, rewards, dones, next_states = utils.unpack_batch(batch)
batch_size = len(batch)
states_v = torch.tensor(states).to(_device)
actions_v = torch.tensor(actions).to(_device)
next_states_v = torch.tensor(next_states).to(_device)
next_distr_v, next_qvals_v = _target_net.both(next_states_v)
next_acts = next_qvals_v.max(1)[1].data.cpu().numpy()
next_distr = _target_net.apply_softmax(next_distr_v)
next_distr = next_distr.data.cpu().numpy()
next_best_distr = next_distr[range(batch_size), next_acts]
dones = dones.astype(np.bool)
proj_distr = dqn_extra.distr_projection(next_best_distr, rewards, dones, gamma)
distr_v = _net(states_v)
sa_vals = distr_v[range(batch_size), actions_v.data]
state_log_sm_v = F.log_softmax(sa_vals, dim=1)
proj_distr_v = torch.tensor(proj_distr).to(_device)
loss_v = -state_log_sm_v * proj_distr_v
return loss_v.sum(dim=1).mean()
if __name__ == "__main__":
random.seed(hyper_params.SEED)
torch.manual_seed(hyper_params.SEED)
params = hyper_params.HYPER_PARAMS['pong']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable CUDA")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params.env_name)
env = ptan.common.wrappers.wrap_dqn(env)
env.seed(hyper_params.SEED)
net = dqn_extra.CategoricalDQN(env.observation_space.shape, env.action_space.n).to(device)
target_net = ptan.agent.TargetNet(net)
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=params.epsilon_start)
epsilon_tracker = epsilon_tracker.EpsilonTracker(selector, params)
agent = ptan.agent.DQNAgent(lambda x: net.qvals(x), selector, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params.gamma)
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=params.replay_size)
optimizer = optim.Adam(net.parameters(), lr=params.learning_rate)
def process_batch(engine_for_batch, batch):
optimizer.zero_grad()
loss_v = calc_loss(batch, net, target_net.target_model, gamma=params.gamma, _device=device)
loss_v.backward()
optimizer.step()
epsilon_tracker.frame(engine_for_batch.state.iteration)
if engine_for_batch.state.iteration % params.target_net_sync == 0:
target_net.sync()
return {
"loss": loss_v.item(),
"epsilon": selector.epsilon,
}
engine = Engine(process_batch)
utils.setup_ignite(engine, params, exp_source, "07_DQN_Categorical")
engine.run(utils.batch_generator(buffer, params.replay_initial, params.batch_size)) | 0.746601 | 0.339636 |
import geopandas as gpd
import json
from affine import Affine
from rasterio.windows import Window
from rasterio.vrt import WarpedVRT
from rasterio.enums import Resampling
from rio_tiler.utils import get_vrt_transform, has_alpha_band
from rio_tiler.utils import _requested_tile_aligned_with_internal_tile
def save_empty_geojson(path, crs):
empty_geojson_dict = {
"type": "FeatureCollection",
"crs":
{
"type": "name",
"properties":
{
"name": "urn:ogc:def:crs:EPSG:{}".format(crs)
}
},
"features":
[]
}
with open(path, 'w') as f:
json.dump(empty_geojson_dict, f)
f.close()
def read_cog_tile(src,
bounds,
tile_size,
indexes=None,
nodata=None,
resampling_method="bilinear",
tile_edge_padding=2):
"""
Read cloud-optimized geotiff tile.
Notes
-----
Modified from `rio-tiler <https://github.com/cogeotiff/rio-tiler>`_.
License included below per terms of use.
BSD 3-Clause License
(c) 2017 Mapbox
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Arguments
---------
src : rasterio.io.DatasetReader
rasterio.io.DatasetReader object
bounds : list
Tile bounds (left, bottom, right, top)
tile_size : list
Output image size
indexes : list of ints or a single int, optional, (defaults: None)
If `indexes` is a list, the result is a 3D array, but is
a 2D array if it is a band index number.
nodata: int or float, optional (defaults: None)
resampling_method : str, optional (default: "bilinear")
Resampling algorithm
tile_edge_padding : int, optional (default: 2)
Padding to apply to each edge of the tile when retrieving data
to assist in reducing resampling artefacts along edges.
Returns
-------
out : array, int
returns pixel value.
"""
if isinstance(indexes, int):
indexes = [indexes]
elif isinstance(indexes, tuple):
indexes = list(indexes)
vrt_params = dict(
add_alpha=True, crs='epsg:' + str(src.crs.to_epsg()),
resampling=Resampling[resampling_method]
)
vrt_transform, vrt_width, vrt_height = get_vrt_transform(
src, bounds, bounds_crs='epsg:' + str(src.crs.to_epsg()))
out_window = Window(col_off=0, row_off=0,
width=vrt_width, height=vrt_height)
if tile_edge_padding > 0 and not \
_requested_tile_aligned_with_internal_tile(src, bounds, tile_size):
vrt_transform = vrt_transform * Affine.translation(
-tile_edge_padding, -tile_edge_padding
)
orig__vrt_height = vrt_height
orig_vrt_width = vrt_width
vrt_height = vrt_height + 2 * tile_edge_padding
vrt_width = vrt_width + 2 * tile_edge_padding
out_window = Window(
col_off=tile_edge_padding,
row_off=tile_edge_padding,
width=orig_vrt_width,
height=orig__vrt_height,
)
vrt_params.update(dict(transform=vrt_transform,
width=vrt_width,
height=vrt_height))
indexes = indexes if indexes is not None else src.indexes
out_shape = (len(indexes), tile_size[1], tile_size[0])
nodata = nodata if nodata is not None else src.nodata
if nodata is not None:
vrt_params.update(dict(nodata=nodata,
add_alpha=False,
src_nodata=nodata))
if has_alpha_band(src):
vrt_params.update(dict(add_alpha=False))
with WarpedVRT(src, **vrt_params) as vrt:
data = vrt.read(
out_shape=out_shape,
indexes=indexes,
window=out_window,
resampling=Resampling[resampling_method],
)
mask = vrt.dataset_mask(out_shape=(tile_size[1], tile_size[0]),
window=out_window)
return data, mask, out_window, vrt_transform | solaris/utils/tile.py | import geopandas as gpd
import json
from affine import Affine
from rasterio.windows import Window
from rasterio.vrt import WarpedVRT
from rasterio.enums import Resampling
from rio_tiler.utils import get_vrt_transform, has_alpha_band
from rio_tiler.utils import _requested_tile_aligned_with_internal_tile
def save_empty_geojson(path, crs):
empty_geojson_dict = {
"type": "FeatureCollection",
"crs":
{
"type": "name",
"properties":
{
"name": "urn:ogc:def:crs:EPSG:{}".format(crs)
}
},
"features":
[]
}
with open(path, 'w') as f:
json.dump(empty_geojson_dict, f)
f.close()
def read_cog_tile(src,
bounds,
tile_size,
indexes=None,
nodata=None,
resampling_method="bilinear",
tile_edge_padding=2):
"""
Read cloud-optimized geotiff tile.
Notes
-----
Modified from `rio-tiler <https://github.com/cogeotiff/rio-tiler>`_.
License included below per terms of use.
BSD 3-Clause License
(c) 2017 Mapbox
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Arguments
---------
src : rasterio.io.DatasetReader
rasterio.io.DatasetReader object
bounds : list
Tile bounds (left, bottom, right, top)
tile_size : list
Output image size
indexes : list of ints or a single int, optional, (defaults: None)
If `indexes` is a list, the result is a 3D array, but is
a 2D array if it is a band index number.
nodata: int or float, optional (defaults: None)
resampling_method : str, optional (default: "bilinear")
Resampling algorithm
tile_edge_padding : int, optional (default: 2)
Padding to apply to each edge of the tile when retrieving data
to assist in reducing resampling artefacts along edges.
Returns
-------
out : array, int
returns pixel value.
"""
if isinstance(indexes, int):
indexes = [indexes]
elif isinstance(indexes, tuple):
indexes = list(indexes)
vrt_params = dict(
add_alpha=True, crs='epsg:' + str(src.crs.to_epsg()),
resampling=Resampling[resampling_method]
)
vrt_transform, vrt_width, vrt_height = get_vrt_transform(
src, bounds, bounds_crs='epsg:' + str(src.crs.to_epsg()))
out_window = Window(col_off=0, row_off=0,
width=vrt_width, height=vrt_height)
if tile_edge_padding > 0 and not \
_requested_tile_aligned_with_internal_tile(src, bounds, tile_size):
vrt_transform = vrt_transform * Affine.translation(
-tile_edge_padding, -tile_edge_padding
)
orig__vrt_height = vrt_height
orig_vrt_width = vrt_width
vrt_height = vrt_height + 2 * tile_edge_padding
vrt_width = vrt_width + 2 * tile_edge_padding
out_window = Window(
col_off=tile_edge_padding,
row_off=tile_edge_padding,
width=orig_vrt_width,
height=orig__vrt_height,
)
vrt_params.update(dict(transform=vrt_transform,
width=vrt_width,
height=vrt_height))
indexes = indexes if indexes is not None else src.indexes
out_shape = (len(indexes), tile_size[1], tile_size[0])
nodata = nodata if nodata is not None else src.nodata
if nodata is not None:
vrt_params.update(dict(nodata=nodata,
add_alpha=False,
src_nodata=nodata))
if has_alpha_band(src):
vrt_params.update(dict(add_alpha=False))
with WarpedVRT(src, **vrt_params) as vrt:
data = vrt.read(
out_shape=out_shape,
indexes=indexes,
window=out_window,
resampling=Resampling[resampling_method],
)
mask = vrt.dataset_mask(out_shape=(tile_size[1], tile_size[0]),
window=out_window)
return data, mask, out_window, vrt_transform | 0.823364 | 0.267414 |
import collections
import json
from six import string_types
from ..type import (GraphQLEnumType, GraphQLInputObjectType, GraphQLList,
GraphQLNonNull, GraphQLScalarType)
_empty_list = []
def is_valid_value(value, type):
"""Given a type and any value, return True if that value is valid."""
if isinstance(type, GraphQLNonNull):
of_type = type.of_type
if value is None:
return [u'Expected "{}", found null.'.format(type)]
return is_valid_value(value, of_type)
if value is None:
return _empty_list
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, collections.Iterable):
errors = []
for i, item in enumerate(value):
item_errors = is_valid_value(item, item_type)
for error in item_errors:
errors.append(u'In element #{}: {}'.format(i, error))
return errors
else:
return is_valid_value(value, item_type)
if isinstance(type, GraphQLInputObjectType):
if not isinstance(value, collections.Mapping):
return [u'Expected "{}", found not an object.'.format(type)]
fields = type.get_fields()
errors = []
for provided_field in sorted(value.keys()):
if provided_field not in fields:
errors.append(u'In field "{}": Unknown field.'.format(provided_field))
for field_name, field in fields.items():
subfield_errors = is_valid_value(value.get(field_name), field.type)
errors.extend(u'In field "{}": {}'.format(field_name, e) for e in subfield_errors)
return errors
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), \
'Must be input type'
# Scalar/Enum input checks to ensure the type can parse the value to
# a non-null value.
parse_result = type.parse_value(value)
if parse_result is None:
return [u'Expected type "{}", found {}.'.format(type, json.dumps(value))]
return _empty_list | graphql/core/utils/is_valid_value.py | import collections
import json
from six import string_types
from ..type import (GraphQLEnumType, GraphQLInputObjectType, GraphQLList,
GraphQLNonNull, GraphQLScalarType)
_empty_list = []
def is_valid_value(value, type):
"""Given a type and any value, return True if that value is valid."""
if isinstance(type, GraphQLNonNull):
of_type = type.of_type
if value is None:
return [u'Expected "{}", found null.'.format(type)]
return is_valid_value(value, of_type)
if value is None:
return _empty_list
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, collections.Iterable):
errors = []
for i, item in enumerate(value):
item_errors = is_valid_value(item, item_type)
for error in item_errors:
errors.append(u'In element #{}: {}'.format(i, error))
return errors
else:
return is_valid_value(value, item_type)
if isinstance(type, GraphQLInputObjectType):
if not isinstance(value, collections.Mapping):
return [u'Expected "{}", found not an object.'.format(type)]
fields = type.get_fields()
errors = []
for provided_field in sorted(value.keys()):
if provided_field not in fields:
errors.append(u'In field "{}": Unknown field.'.format(provided_field))
for field_name, field in fields.items():
subfield_errors = is_valid_value(value.get(field_name), field.type)
errors.extend(u'In field "{}": {}'.format(field_name, e) for e in subfield_errors)
return errors
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), \
'Must be input type'
# Scalar/Enum input checks to ensure the type can parse the value to
# a non-null value.
parse_result = type.parse_value(value)
if parse_result is None:
return [u'Expected type "{}", found {}.'.format(type, json.dumps(value))]
return _empty_list | 0.629205 | 0.202266 |
"""Interfaces to all of the Movie objects offered by the Trakt.tv API"""
from collections import namedtuple
from trakt.core import Alias, Comment, Genre, get, delete
from trakt.sync import (Scrobbler, comment, rate, add_to_history,
remove_from_history, add_to_watchlist,
remove_from_watchlist, add_to_collection,
remove_from_collection, search, checkin_media,
delete_checkin)
from trakt.people import Person
from trakt.utils import slugify, now, extract_ids
__author__ = '<NAME>'
__all__ = ['dismiss_recommendation', 'get_recommended_movies', 'genres',
'trending_movies', 'updated_movies', 'Release', 'Movie',
'Translation']
Translation = namedtuple('Translation', ['title', 'overview', 'tagline',
'language'])
@delete
def dismiss_recommendation(title):
"""Dismiss the movie matching the specified criteria from showing up in
recommendations.
"""
yield 'recommendations/movies/{title}'.format(title=slugify(str(title)))
@get
def get_recommended_movies():
"""Get a list of :class:`Movie`'s recommended based on your watching
history and your friends. Results are returned with the top recommendation
first.
"""
data = yield 'recommendations/movies'
movies = []
for movie in data:
extract_ids(movie)
movies.append(Movie(**movie))
yield movies
@get
def genres():
"""A list of all possible :class:`Movie` Genres"""
data = yield 'genres/movies'
yield [Genre(g['name'], g['slug']) for g in data]
@get
def trending_movies():
"""All :class:`Movie`'s being watched right now"""
data = yield '/movies/trending'
to_ret = []
for movie in data:
watchers = movie.pop('watchers')
to_ret.append(Movie(watchers=watchers, **movie.pop('movie')))
yield to_ret
@get
def updated_movies(timestamp=None):
"""Returns all movies updated since a timestamp. The server time is in PST.
To establish a baseline timestamp, you can use the server/time method. It's
recommended to store the timestamp so you can be efficient in using this
method.
"""
ts = timestamp or now()
data = yield 'movies/updates/{start_date}'.format(start_date=ts)
to_ret = []
for movie in data:
mov = movie.pop('movie')
extract_ids(mov)
mov.update({'updated_at': movie.pop('updated_at')})
to_ret.append(Movie(**mov))
yield to_ret
Release = namedtuple('Release', ['country', 'certification', 'release_date',
'note', 'release_type'])
class Movie(object):
"""A Class representing a Movie object"""
def __init__(self, title, year=None, slug=None, **kwargs):
super(Movie, self).__init__()
self.media_type = 'movies'
self.title = title
self.year = int(year) if year is not None else year
if self.year is not None and slug is None:
self.slug = slugify('-'.join([self.title, str(self.year)]))
else:
self.slug = slug or slugify(self.title)
self.released = self.tmdb_id = self.imdb_id = self.duration = None
self.trakt_id = self.tagline = self.overview = self.runtime = None
self.updated_at = self.trailer = self.homepage = self.rating = None
self.votes = self.language = self.available_translations = None
self.genres = self.certification = None
self._comments = self._images = self._aliases = self._people = None
self._ratings = self._releases = self._translations = None
if len(kwargs) > 0:
self._build(kwargs)
else:
self._get()
@classmethod
def search(cls, title, year=None):
"""Perform a search for a movie with a title matching *title*
:param title: The title to search for
:param year: Optional year to limit results to
"""
return search(title, search_type='movie', year=year)
@get
def _get(self):
"""Handle getting this :class:`Movie`'s data from trakt and building
our attributes from the returned data
"""
data = yield self.ext_full
self._build(data)
def _build(self, data):
"""Build this :class:`Movie` object with the data in *data*"""
extract_ids(data)
for key, val in data.items():
if hasattr(self, '_' + key):
setattr(self, '_' + key, val)
else:
setattr(self, key, val)
@property
def ext(self):
"""Base uri to retrieve basic information about this :class:`Movie`"""
return 'movies/{slug}'.format(slug=self.slug)
@property
def ext_full(self):
"""Uri to retrieve all information about this :class:`Movie`"""
return self.ext + '?extended=full'
@property
def images_ext(self):
"""Uri to retrieve additional image information"""
return self.ext + '?extended=images'
@property
@get
def aliases(self):
"""A list of :class:`Alias` objects representing all of the other
titles that this :class:`Movie` is known by, and the countries where
they go by their alternate titles
"""
if self._aliases is None:
data = yield (self.ext + '/aliases')
self._aliases = [Alias(**alias) for alias in data]
yield self._aliases
@property
def cast(self):
"""All of the cast members that worked on this :class:`Movie`"""
return [p for p in self.people if getattr(p, 'character')]
@property
@get
def comments(self):
"""All comments (shouts and reviews) for this :class:`Movie`. Most
recent comments returned first.
"""
# TODO (jnappi) Pagination
from trakt.users import User
data = yield (self.ext + '/comments')
self._comments = []
for com in data:
user = User(**com.get('user'))
self._comments.append(
Comment(user=user, **{k: com[k] for k in com if k != 'user'})
)
yield self._comments
@property
def crew(self):
"""All of the crew members that worked on this :class:`Movie`"""
return [p for p in self.people if getattr(p, 'job')]
@property
def ids(self):
"""Accessor to the trakt, imdb, and tmdb ids, as well as the trakt.tv
slug
"""
return {'ids': {'trakt': self.trakt, 'slug': self.slug,
'imdb': self.imdb, 'tmdb': self.tmdb}}
@property
@get
def images(self):
"""All of the artwork associated with this :class:`Movie`"""
if self._images is None:
data = yield self.images_ext
self._images = data.get('images', {})
yield self._images
@property
@get
def people(self):
"""A :const:`list` of all of the :class:`People` involved in this
:class:`Movie`, including both cast and crew
"""
if self._people is None:
data = yield (self.ext + '/people')
crew = data.get('crew', {})
cast = []
for c in data.get('cast', []):
person = c.pop('person')
character = c.pop('character')
cast.append(Person(character=character, **person))
_crew = []
for key in crew:
for department in crew.get(key): # lists
person = department.get('person')
person.update({'job': department.get('job')})
_crew.append(Person(**person))
self._people = cast + _crew
yield self._people
@property
@get
def ratings(self):
"""Ratings (between 0 and 10) and distribution for a movie."""
if self._ratings is None:
self._ratings = yield (self.ext + '/ratings')
yield self._ratings
@property
@get
def related(self):
"""The top 10 :class:`Movie`'s related to this :class:`Movie`"""
data = yield (self.ext + '/related')
movies = []
for movie in data:
movies.append(Movie(**movie))
yield movies
@property
@get
def watching_now(self):
"""A list of all :class:`User`'s watching a movie."""
from trakt.users import User
data = yield self.ext + '/watching'
users = []
for user in data:
users.append(User(**user))
yield users
def add_to_library(self):
"""Add this :class:`Movie` to your library."""
add_to_collection(self)
add_to_collection = add_to_library
def add_to_watchlist(self):
"""Add this :class:`Movie` to your watchlist"""
add_to_watchlist(self)
def comment(self, comment_body, spoiler=False, review=False):
"""Add a comment (shout or review) to this :class:`Move` on trakt."""
comment(self, comment_body, spoiler, review)
def dismiss(self):
"""Dismiss this movie from showing up in Movie Recommendations"""
dismiss_recommendation(title=self.title)
@get
def get_releases(self, country_code='us'):
"""Returns all :class:`Release`s for a movie including country,
certification, and release date.
:param country_code: The 2 character country code to search from
:return: a :const:`list` of :class:`Release` objects
"""
if self._releases is None:
data = yield self.ext + '/releases/{cc}'.format(cc=country_code)
self._releases = [Release(**release) for release in data]
yield self._releases
@get
def get_translations(self, country_code='us'):
"""Returns all :class:`Translation`s for a movie, including language
and translated values for title, tagline and overview.
:param country_code: The 2 character country code to search from
:return: a :const:`list` of :class:`Translation` objects
"""
if self._translations is None:
data = yield self.ext + '/translations/{cc}'.format(
cc=country_code
)
self._translations = [Translation(**translation)
for translation in data]
yield self._translations
def mark_as_seen(self, watched_at=None):
"""Add this :class:`Movie`, watched outside of trakt, to your library.
"""
add_to_history(self, watched_at)
def mark_as_unseen(self):
"""Remove this :class:`Movie`, watched outside of trakt, from your
library.
"""
remove_from_history(self)
def rate(self, rating):
"""Rate this :class:`Movie` on trakt. Depending on the current users
settings, this may also send out social updates to facebook, twitter,
tumblr, and path.
"""
rate(self, rating)
def remove_from_library(self):
"""Remove this :class:`Movie` from your library."""
remove_from_collection(self)
remove_from_collection = remove_from_library
def remove_from_watchlist(self):
remove_from_watchlist(self)
def scrobble(self, progress, app_version, app_date):
"""Notify trakt that the current user has finished watching a movie.
This commits this :class:`Movie` to the current users profile. You
should use movie/watching prior to calling this method.
:param progress: % progress, integer 0-100. It is recommended to call
the watching API every 15 minutes, then call the scrobble API near
the end of the movie to lock it in.
:param app_version: Version number of the media center, be as specific
as you can including nightly build number, etc. Used to help debug
your plugin.
:param app_date: Build date of the media center. Used to help debug
your plugin.
"""
return Scrobbler(self, progress, app_version, app_date)
def checkin(self, app_version, app_date, message="", sharing=None,
venue_id="", venue_name="", delete=False):
"""Checkin this :class:`Movie` via the TraktTV API.
:param app_version:Version number of the media center, be as specific
as you can including nightly build number, etc. Used to help debug
your plugin.
:param app_date: Build date of the media center. Used to help debug
your plugin.
:param message: Message used for sharing. If not sent, it will use the
watching string in the user settings.
:param sharing: Control sharing to any connected social networks.
:param venue_id: Foursquare venue ID.
:param venue_name: Foursquare venue name.
:param delete: If True, the checkin will be deleted.
"""
if delete:
delete_checkin()
checkin_media(self, app_version, app_date, message, sharing, venue_id,
venue_name)
def to_json_singular(self):
return {'movie': dict(title=self.title,
year=self.year,
**self.ids)}
def to_json(self):
return {'movies': [dict(title=self.title,
year=self.year,
**self.ids)]}
def __str__(self):
"""String representation of a :class:`Movie`"""
return '<Movie>: {}'.format(self.title)
__repr__ = __str__ | trakt/movies.py | """Interfaces to all of the Movie objects offered by the Trakt.tv API"""
from collections import namedtuple
from trakt.core import Alias, Comment, Genre, get, delete
from trakt.sync import (Scrobbler, comment, rate, add_to_history,
remove_from_history, add_to_watchlist,
remove_from_watchlist, add_to_collection,
remove_from_collection, search, checkin_media,
delete_checkin)
from trakt.people import Person
from trakt.utils import slugify, now, extract_ids
__author__ = '<NAME>'
__all__ = ['dismiss_recommendation', 'get_recommended_movies', 'genres',
'trending_movies', 'updated_movies', 'Release', 'Movie',
'Translation']
Translation = namedtuple('Translation', ['title', 'overview', 'tagline',
'language'])
@delete
def dismiss_recommendation(title):
"""Dismiss the movie matching the specified criteria from showing up in
recommendations.
"""
yield 'recommendations/movies/{title}'.format(title=slugify(str(title)))
@get
def get_recommended_movies():
"""Get a list of :class:`Movie`'s recommended based on your watching
history and your friends. Results are returned with the top recommendation
first.
"""
data = yield 'recommendations/movies'
movies = []
for movie in data:
extract_ids(movie)
movies.append(Movie(**movie))
yield movies
@get
def genres():
"""A list of all possible :class:`Movie` Genres"""
data = yield 'genres/movies'
yield [Genre(g['name'], g['slug']) for g in data]
@get
def trending_movies():
"""All :class:`Movie`'s being watched right now"""
data = yield '/movies/trending'
to_ret = []
for movie in data:
watchers = movie.pop('watchers')
to_ret.append(Movie(watchers=watchers, **movie.pop('movie')))
yield to_ret
@get
def updated_movies(timestamp=None):
"""Returns all movies updated since a timestamp. The server time is in PST.
To establish a baseline timestamp, you can use the server/time method. It's
recommended to store the timestamp so you can be efficient in using this
method.
"""
ts = timestamp or now()
data = yield 'movies/updates/{start_date}'.format(start_date=ts)
to_ret = []
for movie in data:
mov = movie.pop('movie')
extract_ids(mov)
mov.update({'updated_at': movie.pop('updated_at')})
to_ret.append(Movie(**mov))
yield to_ret
Release = namedtuple('Release', ['country', 'certification', 'release_date',
'note', 'release_type'])
class Movie(object):
"""A Class representing a Movie object"""
def __init__(self, title, year=None, slug=None, **kwargs):
super(Movie, self).__init__()
self.media_type = 'movies'
self.title = title
self.year = int(year) if year is not None else year
if self.year is not None and slug is None:
self.slug = slugify('-'.join([self.title, str(self.year)]))
else:
self.slug = slug or slugify(self.title)
self.released = self.tmdb_id = self.imdb_id = self.duration = None
self.trakt_id = self.tagline = self.overview = self.runtime = None
self.updated_at = self.trailer = self.homepage = self.rating = None
self.votes = self.language = self.available_translations = None
self.genres = self.certification = None
self._comments = self._images = self._aliases = self._people = None
self._ratings = self._releases = self._translations = None
if len(kwargs) > 0:
self._build(kwargs)
else:
self._get()
@classmethod
def search(cls, title, year=None):
"""Perform a search for a movie with a title matching *title*
:param title: The title to search for
:param year: Optional year to limit results to
"""
return search(title, search_type='movie', year=year)
@get
def _get(self):
"""Handle getting this :class:`Movie`'s data from trakt and building
our attributes from the returned data
"""
data = yield self.ext_full
self._build(data)
def _build(self, data):
"""Build this :class:`Movie` object with the data in *data*"""
extract_ids(data)
for key, val in data.items():
if hasattr(self, '_' + key):
setattr(self, '_' + key, val)
else:
setattr(self, key, val)
@property
def ext(self):
"""Base uri to retrieve basic information about this :class:`Movie`"""
return 'movies/{slug}'.format(slug=self.slug)
@property
def ext_full(self):
"""Uri to retrieve all information about this :class:`Movie`"""
return self.ext + '?extended=full'
@property
def images_ext(self):
"""Uri to retrieve additional image information"""
return self.ext + '?extended=images'
@property
@get
def aliases(self):
"""A list of :class:`Alias` objects representing all of the other
titles that this :class:`Movie` is known by, and the countries where
they go by their alternate titles
"""
if self._aliases is None:
data = yield (self.ext + '/aliases')
self._aliases = [Alias(**alias) for alias in data]
yield self._aliases
@property
def cast(self):
"""All of the cast members that worked on this :class:`Movie`"""
return [p for p in self.people if getattr(p, 'character')]
@property
@get
def comments(self):
"""All comments (shouts and reviews) for this :class:`Movie`. Most
recent comments returned first.
"""
# TODO (jnappi) Pagination
from trakt.users import User
data = yield (self.ext + '/comments')
self._comments = []
for com in data:
user = User(**com.get('user'))
self._comments.append(
Comment(user=user, **{k: com[k] for k in com if k != 'user'})
)
yield self._comments
@property
def crew(self):
"""All of the crew members that worked on this :class:`Movie`"""
return [p for p in self.people if getattr(p, 'job')]
@property
def ids(self):
"""Accessor to the trakt, imdb, and tmdb ids, as well as the trakt.tv
slug
"""
return {'ids': {'trakt': self.trakt, 'slug': self.slug,
'imdb': self.imdb, 'tmdb': self.tmdb}}
@property
@get
def images(self):
"""All of the artwork associated with this :class:`Movie`"""
if self._images is None:
data = yield self.images_ext
self._images = data.get('images', {})
yield self._images
@property
@get
def people(self):
"""A :const:`list` of all of the :class:`People` involved in this
:class:`Movie`, including both cast and crew
"""
if self._people is None:
data = yield (self.ext + '/people')
crew = data.get('crew', {})
cast = []
for c in data.get('cast', []):
person = c.pop('person')
character = c.pop('character')
cast.append(Person(character=character, **person))
_crew = []
for key in crew:
for department in crew.get(key): # lists
person = department.get('person')
person.update({'job': department.get('job')})
_crew.append(Person(**person))
self._people = cast + _crew
yield self._people
@property
@get
def ratings(self):
"""Ratings (between 0 and 10) and distribution for a movie."""
if self._ratings is None:
self._ratings = yield (self.ext + '/ratings')
yield self._ratings
@property
@get
def related(self):
"""The top 10 :class:`Movie`'s related to this :class:`Movie`"""
data = yield (self.ext + '/related')
movies = []
for movie in data:
movies.append(Movie(**movie))
yield movies
@property
@get
def watching_now(self):
"""A list of all :class:`User`'s watching a movie."""
from trakt.users import User
data = yield self.ext + '/watching'
users = []
for user in data:
users.append(User(**user))
yield users
def add_to_library(self):
"""Add this :class:`Movie` to your library."""
add_to_collection(self)
add_to_collection = add_to_library
def add_to_watchlist(self):
"""Add this :class:`Movie` to your watchlist"""
add_to_watchlist(self)
def comment(self, comment_body, spoiler=False, review=False):
"""Add a comment (shout or review) to this :class:`Move` on trakt."""
comment(self, comment_body, spoiler, review)
def dismiss(self):
"""Dismiss this movie from showing up in Movie Recommendations"""
dismiss_recommendation(title=self.title)
@get
def get_releases(self, country_code='us'):
"""Returns all :class:`Release`s for a movie including country,
certification, and release date.
:param country_code: The 2 character country code to search from
:return: a :const:`list` of :class:`Release` objects
"""
if self._releases is None:
data = yield self.ext + '/releases/{cc}'.format(cc=country_code)
self._releases = [Release(**release) for release in data]
yield self._releases
@get
def get_translations(self, country_code='us'):
"""Returns all :class:`Translation`s for a movie, including language
and translated values for title, tagline and overview.
:param country_code: The 2 character country code to search from
:return: a :const:`list` of :class:`Translation` objects
"""
if self._translations is None:
data = yield self.ext + '/translations/{cc}'.format(
cc=country_code
)
self._translations = [Translation(**translation)
for translation in data]
yield self._translations
def mark_as_seen(self, watched_at=None):
"""Add this :class:`Movie`, watched outside of trakt, to your library.
"""
add_to_history(self, watched_at)
def mark_as_unseen(self):
"""Remove this :class:`Movie`, watched outside of trakt, from your
library.
"""
remove_from_history(self)
def rate(self, rating):
"""Rate this :class:`Movie` on trakt. Depending on the current users
settings, this may also send out social updates to facebook, twitter,
tumblr, and path.
"""
rate(self, rating)
def remove_from_library(self):
"""Remove this :class:`Movie` from your library."""
remove_from_collection(self)
remove_from_collection = remove_from_library
def remove_from_watchlist(self):
remove_from_watchlist(self)
def scrobble(self, progress, app_version, app_date):
"""Notify trakt that the current user has finished watching a movie.
This commits this :class:`Movie` to the current users profile. You
should use movie/watching prior to calling this method.
:param progress: % progress, integer 0-100. It is recommended to call
the watching API every 15 minutes, then call the scrobble API near
the end of the movie to lock it in.
:param app_version: Version number of the media center, be as specific
as you can including nightly build number, etc. Used to help debug
your plugin.
:param app_date: Build date of the media center. Used to help debug
your plugin.
"""
return Scrobbler(self, progress, app_version, app_date)
def checkin(self, app_version, app_date, message="", sharing=None,
venue_id="", venue_name="", delete=False):
"""Checkin this :class:`Movie` via the TraktTV API.
:param app_version:Version number of the media center, be as specific
as you can including nightly build number, etc. Used to help debug
your plugin.
:param app_date: Build date of the media center. Used to help debug
your plugin.
:param message: Message used for sharing. If not sent, it will use the
watching string in the user settings.
:param sharing: Control sharing to any connected social networks.
:param venue_id: Foursquare venue ID.
:param venue_name: Foursquare venue name.
:param delete: If True, the checkin will be deleted.
"""
if delete:
delete_checkin()
checkin_media(self, app_version, app_date, message, sharing, venue_id,
venue_name)
def to_json_singular(self):
return {'movie': dict(title=self.title,
year=self.year,
**self.ids)}
def to_json(self):
return {'movies': [dict(title=self.title,
year=self.year,
**self.ids)]}
def __str__(self):
"""String representation of a :class:`Movie`"""
return '<Movie>: {}'.format(self.title)
__repr__ = __str__ | 0.822225 | 0.346348 |
"""Tests for the X File System (XFS) file-like object."""
import os
import unittest
from dfvfs.file_io import xfs_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests import test_lib as shared_test_lib
class XFSFileTest(shared_test_lib.BaseTestCase):
"""Tests the file-like object implementation using pyfsxfs.file_entry."""
_INODE_ANOTHER_FILE = 11078
_INODE_PASSWORDS_TXT = 11077
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(XFSFileTest, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['xfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an inode."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS,
inode=self._INODE_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# TODO: add a failing scenario.
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS, location='/passwords.txt',
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS, location='/a_directory/another_file',
inode=self._INODE_ANOTHER_FILE,
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS, location='/passwords.txt',
inode=self._INODE_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
if __name__ == '__main__':
unittest.main() | tests/file_io/xfs_file_io.py | """Tests for the X File System (XFS) file-like object."""
import os
import unittest
from dfvfs.file_io import xfs_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests import test_lib as shared_test_lib
class XFSFileTest(shared_test_lib.BaseTestCase):
"""Tests the file-like object implementation using pyfsxfs.file_entry."""
_INODE_ANOTHER_FILE = 11078
_INODE_PASSWORDS_TXT = 11077
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(XFSFileTest, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['xfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an inode."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS,
inode=self._INODE_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# TODO: add a failing scenario.
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS, location='/passwords.txt',
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS, location='/a_directory/another_file',
inode=self._INODE_ANOTHER_FILE,
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_XFS, location='/passwords.txt',
inode=self._INODE_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = xfs_file_io.XFSFile(self._resolver_context, path_spec)
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
if __name__ == '__main__':
unittest.main() | 0.563138 | 0.42913 |
import tkinter as tk
import sqlite3
from files import get_current_file, global_db_path
from widgets import Frame, LabelH3, Label, FrameHilited, LabelH2, Button
from custom_combobox_widget import Combobox
from autofill import EntryAuto, EntryAutoHilited
from styles import make_formats_dict
from messages import open_message, dates_msg, InputMessage
from query_strings import (
select_date_format, select_default_date_format, delete_date_format_all,
insert_date_format_default, update_date_format_date_formats,
update_date_format_est, update_date_format_abt, update_date_format_cal,
update_date_format_befaft, update_date_format_epoch,
update_date_format_julegreg, update_date_format_span,
update_date_format_range)
import dev_tools as dt
from dev_tools import looky, seeline
'''
Treebard's policy is to let the user input dates with lots of freedom while
displaying dates without regard for how the date was input, but rather
in reference to stored user preferences. So this module is kinda complex
but since it's written strictly in accordance with the needs of Treebard,
there's nothing here that doesn't need to be here.
Another policy is to have no pop-up calendars and the like when the user
tries to input a date. These encumbrances only slow down input which is
more easily typed, and while coders might like calendar pop-ups because
they're cute or something, users find them annoying if trying to do any
significant amount of data input. In Treebard, a date is quickly input as a
typed string, with several choices of delimiter between date elements, and
with the elements typed in almost any order.
The policy of not allowing numerical month input makes it easy for Treebard
to tell which number is the year and which number is the day, except for
years less than 100. In this case a simple dialog coaches the user to input
short years with leading zeroes. So the only time a user has to worry about
inputting date elements in a fixed order is when typing "and" or "to"
between two compound dates. For example, the user can type "1852 ja 3 est
and bc 14 f 1901 abt" and Treebard will know that this means "between about
14 Feb 1901 BC and estimated 3 Jan 1852 AD". This allows the user to just
start typing, and as long as the "and" or "to" is in the right place, the
input will be correctly interpreted.
Another policy is to allow no bad date input and no ambiguous date input.
Treebard is meant to be easily sharable. Allowing numerical month input
would be good for some parts of our program by increasing flexibility of
input to infinity and beyond, but would bloat the code and open up the
possibility of easily misinterpreted dates when trees are shared from one
country to another. It would also mean more dialogs for clarification as to
which number is the month, day or year.
Another policy is to ignore that period of time when the Gregorian Calendar
was being adopted in lieu of the ancient Julian Calendar. Some genieware
uglifies these dates according to when western cultures were supposedly
making this transition. Treebard uglifies no date. The transition took
place at different times in different countries, in fact it has only
recently taken place in some countries. The user can mark his dates
"old style" or "new style" in whatever formatting he prefers, but dates like "14 Oct 1752/1753" don't exist in Treebard.
Globals are used for `root` and `widg` because we are validating a single
string found in a single Entry in a single app and none of that will ever
change. These values are set once per use and don't change during the
procedure.
I assume that everything this module does could be imported from any
number of libraries but I enjoyed writing this module three times and I
like having easy access to the procedures I'm using and knowing that the
code was custom-written for my needs and doesn't contain a bunch of extra
stuff that I don't need. DATES is a huge topic and no doubt the available
libraries for dealing with them are over my head.
I've tried making this a class, but a big class to validate one string? The
result is a bunch of instance variables that can be changed all over a big
class, which can have the same confusing effect as global variables, all to
validate one single string. I like classes but in this case, working the
code out procedurally seemed like a better approach, after trying it both
ways.
'''
formats = make_formats_dict()
def get_date_formats(tree_is_open=0):
'''
This runs on load in case the user wants to use the date calculator
without opening a tree. It runs again when a tree loads so the user
preferences for that tree will be used.
'''
if tree_is_open == 0:
current_file = global_db_path
query = select_default_date_format
elif tree_is_open == 1:
current_file = get_current_file()[0]
query = select_date_format
conn = sqlite3.connect(current_file)
cur = conn.cursor()
cur.execute(query)
date_prefs = cur.fetchone()
cur.close()
conn.close()
return date_prefs
date_prefs = get_date_formats()
# "." can't be used as a separator as it would prevent the user
# from using a dot to denote an abbreviation e.g. "A.D."
SEPTORS = (" ", "/", "-", "*", "_")
OK_MONTHS = (
'ja', 'f', 'mar', 'ap', 'may', 'jun',
'jul', 'au', 's', 'oc', 'no', 'd')
MONTH_ABBS = (
'ja.', 'jan.', 'f.', 'fe.', 'feb.', 'mar.', 'ap.', 'apr.',
'jun.', 'jul.', 'au.', 'aug.', 's.', 'se.', 'sep.', 'sept.',
'oc.', 'oct.', 'no.', 'nov.', 'd.', 'de.', 'dec.',
'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'sept',
'oct', 'nov', 'dec', 'fe', 'se', 'de')
FULL_MONTHS = (
"january", "february", "march", "april", "may", "june",
"july", "august", "september", "october", "november", "december")
ALL_MONTHS = [i for i in OK_MONTHS] + [i for i in MONTH_ABBS] + [i for i in FULL_MONTHS]
DAYS_30 = ('ap', 'jun', 's', 'no')
STORE_PFX = ['est', 'cal', 'abt', 'bef', 'aft']
STORE_SFX = ['ad', 'bc', 'os', 'ns', 'ce', 'bce']
OK_ABBS = STORE_PFX + STORE_SFX
MONTH_CONVERSIONS = {
'ja': ['January', 'Jan', 'Jan.'],
'f': ['February', 'Feb', 'Feb.'],
'mar': ['March', 'Mar', 'Mar.'],
'ap': ['April', 'Apr', 'Apr.'],
'may': ['May', 'May', 'May'],
'jun': ['June', 'June', 'June'],
'jul': ['July', 'July', 'July'],
'au': ['August', 'Aug', 'Aug.'],
's': ['September', 'Sep', 'Sep.'],
'oc': ['October', 'Oct', 'Oct.'],
'no': ['November', 'Nov', 'Nov.'],
'd': ['December', 'Dec', 'Dec.']}
EST = ["est", "est.", "est'd"]
ABT = ["abt", "about", "circa", "ca", "ca.", "approx."]
CAL = ["cal", "calc", "calc.", "cal.", "calc'd"]
BEF = ["bef", "bef.", "before"]
AFT = ["aft", "aft.", "after"]
BC = ["BCE", "BC", "B.C.E.", "B.C."]
AD = ["CE", "AD", "C.E.", "A.D."]
JULIAN = ["OS", "O.S.", "old style", "Old Style"]
GREGORIAN = ["NS", "N.S.", "new style", "New Style"]
PAIRS = ((BEF, AFT), (BC, AD), (JULIAN, GREGORIAN))
ABB_PAIRS = []
q = 0
for pair in PAIRS:
paired = []
for r, s in zip(pair[0], pair[1]):
stg = '{}/{}'.format(r, s)
paired.append(stg)
ABB_PAIRS.append(paired)
q += 1
DATE_PREF_COMBOS = (
("18 April 1906", "18 Apr 1906", "18 Apr. 1906", "April 18, 1906",
"Apr 18, 1906", "Apr. 18, 1906"),
EST, ABT, CAL,
ABB_PAIRS[0], ABB_PAIRS[1], ABB_PAIRS[2],
("from [date 1] to [date 2]", "fr. [date 1] to [date 2]",
"frm [date 1] to [date 2]", "fr [date 1] to [date 2]"),
("btwn [date 1] & [date 2]", "btwn [date 1] and [date 2]",
"bet [date 1] & [date 2]", "bet [date 1] and [date 2]",
"bet. [date 1] & [date 2]" , "bet. [date 1] and [date 2]"))
DATE_FORMATS = (
'dmy', 'dmy_abb', 'dmy_dot', 'mdy', 'mdy_abb', 'mdy_dot')
SPAN_FORMATS = ("from_to", "fr._to", "frm_to", "fr_to")
RANGE_FORMATS = (
"btwn_&", "btwn_and", "bet_&", "bet_and", "bet._&", "bet._and")
FORMAT_TO_STRIP = ("from", "fr.", "frm", "fr", "btwn", "bet", "bet.", ",", "between")
DATE_FORMAT_LOOKUP = dict(zip(DATE_PREF_COMBOS[0], DATE_FORMATS))
SPAN_FORMAT_LOOKUP = dict(zip(DATE_PREF_COMBOS[7], SPAN_FORMATS))
RANGE_FORMAT_LOOKUP = dict(zip(DATE_PREF_COMBOS[8], RANGE_FORMATS))
OK_PREFIXES = ABT+EST+CAL+BEF+AFT
OK_SUFFIXES = BC+AD+JULIAN+GREGORIAN
root = None
widg = None
def validate_date(parent, inwidg, final):
global root, widg
root = parent
widg = inwidg
final = find_bad_dates(final)
if final is None: return
results = make_date_dict(list(final))
if results:
final, order, compound_date_link = results
else:
return
if final is None: return
final = order_compound_dates(final, order, compound_date_link)
if final is None: return
final = make_date_string(final)
return final
def find_bad_dates(final):
final = final.replace("&", "and")
for mark in FORMAT_TO_STRIP:
final = final.replace(mark, "")
for sep in SEPTORS:
final = final.replace(sep, " ")
terms = final.split()
for term in terms:
term = term.strip()
compounds = find_word_errors(terms)
if not compounds:
return
final = find_number_errors(compounds)
return final
def count_month_words(info):
compound = False
month_words = []
for term in info:
if term.lower() in ALL_MONTHS:
month_words.append(term)
elif term.lower() in ("to", "and"):
compound = True
else:
"case not handled"
return month_words, compound
def err_done0(widg, dlg):
widg.delete(0, 'end')
dlg.destroy()
widg.focus_set()
def find_word_errors(terms):
month_words, compound = count_month_words(terms)
compound_date_link = None
comp1 = []
comp2 = []
for term in terms:
if term.lower() in ("and", "to"):
if compound_date_link is not None:
msg = open_message(
root,
dates_msg[0],
"Repeated Compound Date Link",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
compound_date_link = term
elif compound_date_link is None:
comp1.append(term)
elif compound_date_link is not None:
comp2.append(term)
else:
print("line", looky(seeline()).lineno, "case not handled:")
months = len(month_words)
if months > 1 and compound_date_link is None:
msg = open_message(
root,
dates_msg[1],
"Too Many Months Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
if months > 2:
msg = open_message(
root,
dates_msg[2],
"Too Many Months Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
elif months == 2:
pass
elif months <= 1:
for lst in (comp1, comp2):
n = 0
for item in lst:
if item.isdigit():
n += 1
if months == 1 and n > 1:
month_words2 = count_month_words(lst)[0]
if len(month_words2) == months:
pass
else:
msg = open_message(
root,
dates_msg[3],
"Day Input Without Month",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(
widg, dlg))
return
elif months == 0 and n == 1:
pass
elif months == 0 and n > 1:
msg = open_message(
root,
dates_msg[3],
"Day Input Without Month",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
for lst in (comp1, comp2):
prefixes = 0
suffixes = 0
for elem in lst:
if elem.lower() in OK_PREFIXES:
prefixes += 1
elif elem.upper() in OK_SUFFIXES:
suffixes += 1
if prefixes > 1 or suffixes > 1:
msg = open_message(
root,
dates_msg[4],
"Too Many Prefixes or Suffixes",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
return comp1, comp2, compound_date_link, compound
def standardize_month(term):
if term.startswith(OK_MONTHS):
for mo in OK_MONTHS:
if term.startswith(mo):
term = mo
break
return term
def find_number_errors(compounds):
for lst in compounds[0:2]:
nums = 0
over_two_digits = 0
lenlist = len(lst)
for item in lst:
if item.isdigit() is True:
if len(item) > 2:
if over_two_digits > 0:
msg = open_message(
root,
dates_msg[5],
"Too Many Years Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(
widg, dlg))
return
else:
over_two_digits += 1
nums += 1
if nums >= 3:
msg = open_message(
root,
dates_msg[6],
"Too Many Numerical Terms Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
elif lenlist > 5:
msg = open_message(
root,
dates_msg[7],
"Too Many Terms Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
if lenlist == 1 and lst[0].isalpha() is True:
msg = open_message(
root,
dates_msg[8],
"Numerical Terms Input Lacking",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
return compounds
def clarify_year(numbers, lst):
'''
For years < 100 if user types without preceding zeroes.
'''
copy = lst
head2 = "{} or {}?".format(numbers[0], numbers[1])
msg = InputMessage(
root, root=root, title="Clarify Year", ok_txt="OK",
cancel_txt="CANCEL", head1=dates_msg[11], head2=head2,
grab=True, entry=True, wraplength=300)
year = msg.show().strip()
if len(year) != 4:
msg = open_message(
root,
dates_msg[12],
"No Year Entered",
"OK")
msg[0].grab_set()
root.wait_window(msg[0])
widg.delete(0, 'end')
widg.focus_set()
return
a = 0
for num in numbers:
if int(num) == int(year):
if a == 1:
day = numbers[0]
elif a == 0:
day = numbers[1]
x = 0
for item in copy:
if item.isalpha() is False and item != day:
lst[x] = year
x += 1
break
a += 1
return year, day, lst
def make_date_dict(final):
def find_month(lst, b):
g = 0
for item in lst:
if item.isalpha():
if item.lower().startswith(OK_MONTHS):
for mo in OK_MONTHS:
if item.lower().startswith(mo):
month_key = mo
break
date_dict[b]["month"] = month_key
break
g += 1
return lst
def find_year(lst, b):
def add_zeros(lst, the_one):
fixed = the_one[0]
length = len(the_one[0])
idx = the_one[1]
if length == 2:
fixed = "00" + the_one[0]
elif length == 3:
fixed = "0" + the_one[0]
lst[idx] = fixed
return lst
num_count = []
u = 0
for item in lst:
if item.isdigit():
num_count.append((item, u))
u += 1
if len(num_count) == 1:
the_one = num_count[0]
lst = add_zeros(lst, the_one)
under_two = 0
nums = []
for item in lst:
if item.isdigit():
nums.append(item)
if len(item) < 3:
if under_two > 0:
if clarify_year(nums, lst) is None:
return
else:
year, day, lst = clarify_year(nums, lst)
date_dict[b]["year"] = year
else:
under_two += 1
elif 5 > len(item) > 2 :
date_dict[b]["year"] = item
break
elif len(item) > 4:
msg = open_message(
root,
dates_msg[13],
"Year Greater than 9999",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(
widg, dlg))
return
return lst
def find_day(lst, b):
if lst is None: return
i = 0
for item in lst:
if item.isdigit():
if len(item) > 2:
i += 1
continue
elif len(item) <= 2:
date_dict[b]["day"] = item
break
i += 1
return lst
compound_date_link, compound = final[2:]
date_dict = [{}, {}]
if len(final) == 1:
comps = [final[0]]
elif len(final) > 1:
comps = [final[0], final[1]]
b = 0
for lst in comps:
lst = find_month(lst, b)
lst = find_year(lst, b)
lst = find_day(lst, b)
comps[b] = lst
b += 1
check_days_in_months(date_dict)
order = ["ad", "ad"]
e = 0
for lst in comps:
if lst is None: return
for item in lst:
if item.upper() in BC:
order[e] = "bc"
elif item.upper() in AD:
order[e] = "ad"
e += 1
f = 0
for lst in comps:
for item in lst:
if not item.isdigit() and not item.lower().startswith(OK_MONTHS):
if item.lower() in OK_PREFIXES:
date_dict = assign_prefixes(date_dict, item, f)
elif (item in OK_SUFFIXES or
item.upper() in OK_SUFFIXES or item.title() in OK_SUFFIXES):
date_dict = assign_suffixes(date_dict, item, f)
f += 1
if compound is True:
if date_dict[0] == date_dict[1]:
msg = open_message(
root,
dates_msg[9],
"Indistinct Compound Date",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
return date_dict, order, compound_date_link
def assign_prefixes(date_dict, item, f):
item = item.lower()
if item in ABT:
term = "abt"
elif item in EST:
term = "est"
elif item in CAL:
term = "cal"
elif item in BEF:
term = "bef"
elif item in AFT:
term = "aft"
date_dict[f]["prefix"] = term
return date_dict
def assign_suffixes(date_dict, item, f):
for i in (item, item.upper(), item.title()):
if i in BC:
term = "bc"
break
elif i in AD:
term = "ad"
break
elif i in JULIAN:
term = "os"
break
elif i in GREGORIAN:
term = "ns"
break
date_dict[f]["suffix"] = term
return date_dict
def check_days_in_months(date_dict):
for dkt in date_dict:
if dkt.get("month") is None:
continue
if len(dkt) != 0:
leap_year = False
maxdays = 31
if dkt["month"] == "f":
maxdays = 28
if dkt.get("year") is not None:
if int(dkt["year"]) % 4 == 0:
maxdays = 29
else:
return
elif dkt["month"] in DAYS_30:
maxdays = 30
if dkt.get("day") and int(dkt["day"]) > maxdays:
msg = open_message(
root,
dates_msg[10],
"Too Many Days for the Month",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
def order_compound_dates(final, order, compound_date_link):
if len(final[1]) == 0:
final.insert(1, "")
return final
sort1 = []
sort2 = [[], []]
u = 0
for dkt in final:
sort1.append(int(dkt["year"]))
w = 1
for mo in OK_MONTHS:
if dkt.get("month") and dkt["month"] == mo:
sort2[u].append(w)
continue
w += 1
if dkt.get("day"):
sort2[u].append(int(dkt["day"]))
dkt["sort1"] = sort1[u]
dkt["sort2"] = sort2[u]
u += 1
if order == ["ad", "ad"]:
fwd = sorted(final, key=lambda i: i["sort1"])
sort_again = fwd
if sort1[0] == sort1[1]:
sort_again = sorted(fwd, key=lambda i: i["sort2"])
sort_again.insert(1, compound_date_link)
return sort_again
elif order == ["bc", "bc"]:
rev = sorted(final, key=lambda i: i["sort1"], reverse=True)
sort_again = rev
if sort1[0] == sort1[1]:
sort_again = sorted(rev, key=lambda i: i["sort2"])
sort_again.insert(1, compound_date_link)
return sort_again
elif order == ["ad", "bc"]:
right = [final[1], final[0]]
right.insert(1, compound_date_link)
return right
elif order == ["bc", "ad"]:
final.insert(1, compound_date_link)
return final
def make_date_string(final):
def concat_parts(
prefix1="", year1="0000", month1="00", day1="00", suffix1="",
link="", prefix2="", year2="", month2="", day2="", suffix2=""):
date_string = "{}-{}-{}-{}-{}-{}-{}-{}-{}-{}-{}".format(
prefix1, year1, month1, day1, suffix1,
link, prefix2, year2, month2, day2, suffix2)
return date_string
comp1 = final[0]
link = final[1]
comp2 = final[2]
prefix1 = comp1.get("prefix", "")
year1 = comp1.get("year", "")
month1 = comp1.get("month", "")
day1 = comp1.get("day", "")
suffix1 = comp1.get("suffix", "")
if len(link) == 0:
return concat_parts(prefix1, year1, month1, day1, suffix1)
link = link
prefix2 = comp2.get("prefix", "")
year2 = comp2.get("year", "")
month2 = comp2.get("month", "")
day2 = comp2.get("day", "")
suffix2 = comp2.get("suffix", "")
return concat_parts(
prefix1, year1, month1, day1, suffix1,
link, prefix2, year2, month2, day2, suffix2)
def format_stored_date(stored_date, date_prefs=date_prefs):
''' Also used in events_table.py. '''
if stored_date == "-0000-00-00-------":
return ""
dateform = date_prefs[0]
formatted_date = ""
preprefix = ""
prefix1 = ""
year1 = ""
month1 = ""
day1 = ""
suffix1 = ""
link = ""
prefix2 = ""
year2 = ""
month2 = ""
day2 = ""
suffix2 = ""
span = False
ranje = False
compound = False
parts = stored_date.split("-")
if 'to' in parts:
span = True
compound = True
elif 'and' in parts:
ranje = True
compound = True
y = 0
for part in parts:
if len(part) == 0:
pass
elif y in (0, 6):
part = find_prefix(part, date_prefs)
if y == 0:
prefix1 = part
elif y == 6:
prefix2 = part
elif y in (1, 7):
part = part.lstrip("0")
if y == 1:
year1 = part
elif y == 7:
year2 = part
elif y in (2, 8):
part = convert_month(part, dateform)
if y == 2:
month1 = part
elif y == 8:
month2 = part
elif y in (3, 9):
part = part.lstrip("0")
if y == 3:
day1 = part
elif y == 9:
day2 = part
elif y in (4, 10):
part = find_suffix(part, date_prefs)
if y == 4:
suffix1 = part
elif y == 10:
suffix2 = part
elif y == 5:
if compound is False:
break
if span is True:
part = date_prefs[7].split("_")
preprefix = part[0]
link = part[1]
elif ranje is True:
part = date_prefs[8].split("_")
preprefix = part[0]
link = part[1]
y += 1
t = 0
for tup in ((suffix1, year1), (suffix2, year2)):
suffix = tup[0]
year = tup[1]
if suffix in AD:
if int(year) > 99:
suffix = ""
if t == 0:
suffix1 = suffix
elif t == 1:
suffix2 = suffix
t += 1
month_first_commas2 = (
preprefix, prefix1, month1, day1 + ",", year1, suffix1,
link, prefix2, month2, day2 + ",", year2, suffix2)
month_first_comma_a = (
preprefix, prefix1, month1, day1 + ",", year1, suffix1,
link, prefix2, month2, day2, year2, suffix2)
month_first_comma_b = (
preprefix, prefix1, month1, day1, year1, suffix1,
link, prefix2, month2, day2 + ",", year2, suffix2)
month_first_no_comma = (
preprefix, prefix1, month1, day1, year1, suffix1,
link, prefix2, month2, day2, year2, suffix2)
day_first = (
preprefix, prefix1, day1, month1, year1, suffix1,
link, prefix2, day2, month2, year2, suffix2)
len1 = len(day1)
len2 = len(day2)
if "dm" in dateform:
order = day_first
elif "md" in dateform:
if compound is True:
if len1 > 0 and len2 > 0:
order = month_first_commas2
elif len1 > 0 and len2 == 0:
order = month_first_comma_a
elif len1 == 0 and len2 > 0:
order = month_first_comma_b
else:
order = month_first_no_comma
else:
if len1 > 0:
order = month_first_comma_a
else:
order = month_first_no_comma
formatted_date = "{} {} {} {} {} {} {} {} {} {} {} {}".format(*order)
formatted_date = " ".join(formatted_date.split())
return formatted_date
def find_prefix(part, date_prefs):
if part == 'abt':
prefix = date_prefs[1]
elif part == 'est':
prefix = date_prefs[2]
elif part == 'cal':
prefix = date_prefs[3]
elif part in ('bef', 'aft'):
bef_aft = date_prefs[4].split("/")
if part == 'bef':
prefix = bef_aft[0]
elif part == 'aft':
prefix = bef_aft[1]
else:
prefix = ""
return prefix
def find_suffix(part, date_prefs):
if part in ("bc, ad"):
bc_ad = date_prefs[5].split("/")
if part == "bc":
suffix = bc_ad[0]
elif part == "ad":
suffix = bc_ad[1]
elif part in ("os, ns"):
os_ns = date_prefs[6].split("/")
if part == "os":
suffix = bc_ad[0]
elif part == "ns":
suffix = bc_ad[1]
else:
suffix = ""
return suffix
def convert_month(part, dateform):
month = ""
idx = 0
if 'abb' in dateform:
idx = 1
elif 'dot' in dateform:
idx = 2
for k,v in MONTH_CONVERSIONS.items():
if k == part:
month = v[idx]
break
return month
class DatePreferences(Frame):
def __init__(self, master, *args, **kwargs):
Frame.__init__(self, master, *args, **kwargs)
self.master = master
self.prefcombos = {}
self.make_widgets_top()
self.make_widgets_bottom()
def revert_to_default(self):
current_file = get_current_file()[0]
conn = sqlite3.connect(current_file)
conn.execute('PRAGMA foreign_keys = 1')
cur = conn.cursor()
cur.execute(delete_date_format_all)
conn.commit()
cur.execute(insert_date_format_default)
conn.commit()
cur.close()
conn.close()
for combo in self.prefcombos.values():
combo.entry.delete(0, 'end')
def get_new_date_prefs(self):
date_form = None
est_form = None
abt_form = None
cal_form = None
befaft_form = None
epoch_form = None
julegreg_form = None
span_form = None
range_form = None
for combo in self.prefcombos.values():
if len(combo.entry.get()) != 0:
var_form = combo.entry.get()
if combo == self.prefcombos['General']:
date_form = var_form
for k,v in DATE_FORMAT_LOOKUP.items():
if date_form == k:
date_form = v
elif combo == self.prefcombos['Estimated']:
est_form = var_form
elif combo == self.prefcombos['Approximate']:
abt_form = var_form
elif combo == self.prefcombos['Calculated']:
cal_form = var_form
elif combo == self.prefcombos['Before/After']:
befaft_form = var_form
elif combo == self.prefcombos['Epoch']:
epoch_form = var_form
elif combo == self.prefcombos['Julian/Gregorian']:
julegreg_form = var_form
elif combo == self.prefcombos['From...To...']:
span_form = var_form
for k,v in SPAN_FORMAT_LOOKUP.items():
if span_form == k:
span_form = v
elif combo == self.prefcombos['Between...And...']:
range_form = var_form
for k,v in RANGE_FORMAT_LOOKUP.items():
if range_form == k:
range_form = v
self.set_new_date_prefs(
date_form, est_form, abt_form, cal_form, befaft_form, epoch_form,
julegreg_form, span_form, range_form)
def set_new_date_prefs(self,
date_form, est_form, abt_form, cal_form, befaft_form, epoch_form,
julegreg_form, span_form, range_form):
for combo in self.prefcombos.values():
current_file = get_current_file()[0]
conn = sqlite3.connect(current_file)
conn.execute('PRAGMA foreign_keys = 1')
cur = conn.cursor()
if date_form and combo is self.prefcombos['General']:
cur.execute(update_date_format_date_formats, (date_form,))
elif est_form and combo is self.prefcombos['Estimated']:
cur.execute(update_date_format_est, (est_form,))
elif abt_form and combo is self.prefcombos['Approximate']:
cur.execute(update_date_format_abt, (abt_form,))
elif cal_form and combo is self.prefcombos['Calculated']:
cur.execute(update_date_format_cal, (cal_form,))
elif befaft_form and combo is self.prefcombos['Before/After']:
cur.execute(update_date_format_befaft, (befaft_form,))
elif epoch_form and combo is self.prefcombos['Epoch']:
cur.execute(update_date_format_epoch, (epoch_form,))
elif julegreg_form and combo is self.prefcombos['Julian/Gregorian']:
cur.execute(update_date_format_julegreg, (julegreg_form,))
elif span_form and combo is self.prefcombos['From...To...']:
cur.execute(update_date_format_span, (span_form,))
elif range_form and combo is self.prefcombos['Between...And...']:
cur.execute(update_date_format_range, (range_form,))
conn.commit()
cur.close()
conn.close()
combo.entry.delete(0, 'end')
def show_test_date_formatted(self, evt):
widg = evt.widget
storable_date = validate_date(
self.master,
widg,
widg.get())
date_prefs = get_date_formats(tree_is_open=1)
formatted_date = format_stored_date(
storable_date, date_prefs=date_prefs)
widg.delete(0, 'end')
widg.insert(0, formatted_date)
def make_widgets_top(self):
self.test_frm = Frame(self)
self.tester_head = LabelH3(
self.test_frm,
text="Date Entry Demo (doesn't affect your tree)")
DATE_ENTRIES = ['Date Input I', 'Date Input II', 'Date Input III']
self.date_test = {}
g = 0
for lab in DATE_ENTRIES:
lbl = Label(self.test_frm, text=DATE_ENTRIES[g])
lbl.grid(column=0, row= g+1, padx=24, sticky='e')
dent = EntryAutoHilited(self.test_frm)
dent.grid(column=1, row=g+1, sticky='ew')
dent.config(width=64)
dent.bind("<FocusOut>", self.show_test_date_formatted)
self.date_test[lab] = dent
g += 1
def make_widgets_bottom(self):
prefs_area = Frame(self)
buttonbox = Frame(self)
self.pref_head = LabelH2(
prefs_area, text='Set Date Display Preferences')
pref_head2 = Label(
prefs_area,
text='first value in each dropdown list is default')
pfx_lab = LabelH3(prefs_area, text='Prefixes')
sfx_lab = LabelH3(prefs_area, text='Suffixes')
cmpd_lab = LabelH3(prefs_area, text='Compound Dates')
PREF_HEADS = (
"General", "Estimated", "Approximate", "Calculated",
"Before/After", "Epoch", "Julian/Gregorian",
"From...To...", "Between...And...")
date_pref_heads = {}
p = 0
for heading in PREF_HEADS:
lab = LabelH3(prefs_area, text=PREF_HEADS[p])
date_pref_heads[heading] = lab
combo = Combobox(
prefs_area,
root,
height=300,
values=DATE_PREF_COMBOS[p])
self.prefcombos[heading] = combo
p += 1
self.submit = Button(
buttonbox,
text='SUBMIT PREFERENCES',
command=self.get_new_date_prefs,
width=30)
self.revert = Button(
buttonbox,
text='REVERT TO DEFAULT VALUES',
command=self.revert_to_default,
width=30)
# children of self
self.test_frm.grid(column=0, row=0, pady=12)
prefs_area.grid(column=0, row=1, pady=12)
buttonbox.grid(column=0, row=2, pady=12)
# children of self.test_frm
self.tester_head.grid(column=1, row=0, columnspan=4, sticky='we')
# children of prefs_area
self.pref_head.grid(column=0, row=0, columnspan=3, sticky='w', padx=(12,0))
pref_head2.grid(
column=0, row=1, columnspan=3, sticky='w', padx=(12,0))
date_pref_heads['General'].grid(column=3, row=0, padx=12)
self.prefcombos['General'].grid(column=3, row=1, padx=12, pady=(0,12))
pfx_lab.grid(column=0, row=2, sticky='w', pady=12, padx=12)
sfx_lab.grid(column=0, row=5, sticky='w', pady=12, padx=12)
cmpd_lab.grid(column=2, row=5, sticky='w', pady=12, padx=12)
date_pref_heads['Estimated'].grid(column=0, row=3, padx=12)
self.prefcombos['Estimated'].grid(column=0, row=4, padx=12, pady=(0,18))
date_pref_heads['Approximate'].grid(column=1, row=3, padx=12)
self.prefcombos['Approximate'].grid(column=1, row=4, padx=12, pady=(0,18))
date_pref_heads['Calculated'].grid(column=2, row=3, padx=12)
self.prefcombos['Calculated'].grid(column=2, row=4, padx=12, pady=(0,18))
date_pref_heads['Before/After'].grid(column=3, row=3, padx=12)
self.prefcombos['Before/After'].grid(column=3, row=4, padx=12, pady=(0,18))
date_pref_heads['Epoch'].grid(column=0, row=6, padx=12)
self.prefcombos['Epoch'].grid(column=0, row=7, padx=12, pady=(0,12))
date_pref_heads['Julian/Gregorian'].grid(column=1, row=6, padx=12)
self.prefcombos['Julian/Gregorian'].grid(
column=1, row=7, padx=12, pady=(0,12))
date_pref_heads['From...To...'].grid(column=2, row=6, padx=12)
self.prefcombos['From...To...'].grid(column=2, row=7, padx=12, pady=(0,12))
date_pref_heads['Between...And...'].grid(column=3, row=6, padx=12)
self.prefcombos['Between...And...'].grid(
column=3, row=7, padx=12, pady=(0,12))
# children of buttonbox
self.submit.grid(column=0, row=0, padx=(0,12))
self.revert.grid(column=1, row=0, padx=(12,0))
if __name__ == "__main__":
# this doesn't do anything yet
from autofill import EntryAuto
from widgets import Entry
root = tk.Tk()
inwidg = EntryAuto(root)
inwidg.grid()
inwidg.focus_set()
traverse = Entry(root)
traverse.grid()
root.mainloop() | app/python/dates.py |
import tkinter as tk
import sqlite3
from files import get_current_file, global_db_path
from widgets import Frame, LabelH3, Label, FrameHilited, LabelH2, Button
from custom_combobox_widget import Combobox
from autofill import EntryAuto, EntryAutoHilited
from styles import make_formats_dict
from messages import open_message, dates_msg, InputMessage
from query_strings import (
select_date_format, select_default_date_format, delete_date_format_all,
insert_date_format_default, update_date_format_date_formats,
update_date_format_est, update_date_format_abt, update_date_format_cal,
update_date_format_befaft, update_date_format_epoch,
update_date_format_julegreg, update_date_format_span,
update_date_format_range)
import dev_tools as dt
from dev_tools import looky, seeline
'''
Treebard's policy is to let the user input dates with lots of freedom while
displaying dates without regard for how the date was input, but rather
in reference to stored user preferences. So this module is kinda complex
but since it's written strictly in accordance with the needs of Treebard,
there's nothing here that doesn't need to be here.
Another policy is to have no pop-up calendars and the like when the user
tries to input a date. These encumbrances only slow down input which is
more easily typed, and while coders might like calendar pop-ups because
they're cute or something, users find them annoying if trying to do any
significant amount of data input. In Treebard, a date is quickly input as a
typed string, with several choices of delimiter between date elements, and
with the elements typed in almost any order.
The policy of not allowing numerical month input makes it easy for Treebard
to tell which number is the year and which number is the day, except for
years less than 100. In this case a simple dialog coaches the user to input
short years with leading zeroes. So the only time a user has to worry about
inputting date elements in a fixed order is when typing "and" or "to"
between two compound dates. For example, the user can type "1852 ja 3 est
and bc 14 f 1901 abt" and Treebard will know that this means "between about
14 Feb 1901 BC and estimated 3 Jan 1852 AD". This allows the user to just
start typing, and as long as the "and" or "to" is in the right place, the
input will be correctly interpreted.
Another policy is to allow no bad date input and no ambiguous date input.
Treebard is meant to be easily sharable. Allowing numerical month input
would be good for some parts of our program by increasing flexibility of
input to infinity and beyond, but would bloat the code and open up the
possibility of easily misinterpreted dates when trees are shared from one
country to another. It would also mean more dialogs for clarification as to
which number is the month, day or year.
Another policy is to ignore that period of time when the Gregorian Calendar
was being adopted in lieu of the ancient Julian Calendar. Some genieware
uglifies these dates according to when western cultures were supposedly
making this transition. Treebard uglifies no date. The transition took
place at different times in different countries, in fact it has only
recently taken place in some countries. The user can mark his dates
"old style" or "new style" in whatever formatting he prefers, but dates like "14 Oct 1752/1753" don't exist in Treebard.
Globals are used for `root` and `widg` because we are validating a single
string found in a single Entry in a single app and none of that will ever
change. These values are set once per use and don't change during the
procedure.
I assume that everything this module does could be imported from any
number of libraries but I enjoyed writing this module three times and I
like having easy access to the procedures I'm using and knowing that the
code was custom-written for my needs and doesn't contain a bunch of extra
stuff that I don't need. DATES is a huge topic and no doubt the available
libraries for dealing with them are over my head.
I've tried making this a class, but a big class to validate one string? The
result is a bunch of instance variables that can be changed all over a big
class, which can have the same confusing effect as global variables, all to
validate one single string. I like classes but in this case, working the
code out procedurally seemed like a better approach, after trying it both
ways.
'''
formats = make_formats_dict()
def get_date_formats(tree_is_open=0):
'''
This runs on load in case the user wants to use the date calculator
without opening a tree. It runs again when a tree loads so the user
preferences for that tree will be used.
'''
if tree_is_open == 0:
current_file = global_db_path
query = select_default_date_format
elif tree_is_open == 1:
current_file = get_current_file()[0]
query = select_date_format
conn = sqlite3.connect(current_file)
cur = conn.cursor()
cur.execute(query)
date_prefs = cur.fetchone()
cur.close()
conn.close()
return date_prefs
date_prefs = get_date_formats()
# "." can't be used as a separator as it would prevent the user
# from using a dot to denote an abbreviation e.g. "A.D."
SEPTORS = (" ", "/", "-", "*", "_")
OK_MONTHS = (
'ja', 'f', 'mar', 'ap', 'may', 'jun',
'jul', 'au', 's', 'oc', 'no', 'd')
MONTH_ABBS = (
'ja.', 'jan.', 'f.', 'fe.', 'feb.', 'mar.', 'ap.', 'apr.',
'jun.', 'jul.', 'au.', 'aug.', 's.', 'se.', 'sep.', 'sept.',
'oc.', 'oct.', 'no.', 'nov.', 'd.', 'de.', 'dec.',
'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'sept',
'oct', 'nov', 'dec', 'fe', 'se', 'de')
FULL_MONTHS = (
"january", "february", "march", "april", "may", "june",
"july", "august", "september", "october", "november", "december")
ALL_MONTHS = [i for i in OK_MONTHS] + [i for i in MONTH_ABBS] + [i for i in FULL_MONTHS]
DAYS_30 = ('ap', 'jun', 's', 'no')
STORE_PFX = ['est', 'cal', 'abt', 'bef', 'aft']
STORE_SFX = ['ad', 'bc', 'os', 'ns', 'ce', 'bce']
OK_ABBS = STORE_PFX + STORE_SFX
MONTH_CONVERSIONS = {
'ja': ['January', 'Jan', 'Jan.'],
'f': ['February', 'Feb', 'Feb.'],
'mar': ['March', 'Mar', 'Mar.'],
'ap': ['April', 'Apr', 'Apr.'],
'may': ['May', 'May', 'May'],
'jun': ['June', 'June', 'June'],
'jul': ['July', 'July', 'July'],
'au': ['August', 'Aug', 'Aug.'],
's': ['September', 'Sep', 'Sep.'],
'oc': ['October', 'Oct', 'Oct.'],
'no': ['November', 'Nov', 'Nov.'],
'd': ['December', 'Dec', 'Dec.']}
EST = ["est", "est.", "est'd"]
ABT = ["abt", "about", "circa", "ca", "ca.", "approx."]
CAL = ["cal", "calc", "calc.", "cal.", "calc'd"]
BEF = ["bef", "bef.", "before"]
AFT = ["aft", "aft.", "after"]
BC = ["BCE", "BC", "B.C.E.", "B.C."]
AD = ["CE", "AD", "C.E.", "A.D."]
JULIAN = ["OS", "O.S.", "old style", "Old Style"]
GREGORIAN = ["NS", "N.S.", "new style", "New Style"]
PAIRS = ((BEF, AFT), (BC, AD), (JULIAN, GREGORIAN))
ABB_PAIRS = []
q = 0
for pair in PAIRS:
paired = []
for r, s in zip(pair[0], pair[1]):
stg = '{}/{}'.format(r, s)
paired.append(stg)
ABB_PAIRS.append(paired)
q += 1
DATE_PREF_COMBOS = (
("18 April 1906", "18 Apr 1906", "18 Apr. 1906", "April 18, 1906",
"Apr 18, 1906", "Apr. 18, 1906"),
EST, ABT, CAL,
ABB_PAIRS[0], ABB_PAIRS[1], ABB_PAIRS[2],
("from [date 1] to [date 2]", "fr. [date 1] to [date 2]",
"frm [date 1] to [date 2]", "fr [date 1] to [date 2]"),
("btwn [date 1] & [date 2]", "btwn [date 1] and [date 2]",
"bet [date 1] & [date 2]", "bet [date 1] and [date 2]",
"bet. [date 1] & [date 2]" , "bet. [date 1] and [date 2]"))
DATE_FORMATS = (
'dmy', 'dmy_abb', 'dmy_dot', 'mdy', 'mdy_abb', 'mdy_dot')
SPAN_FORMATS = ("from_to", "fr._to", "frm_to", "fr_to")
RANGE_FORMATS = (
"btwn_&", "btwn_and", "bet_&", "bet_and", "bet._&", "bet._and")
FORMAT_TO_STRIP = ("from", "fr.", "frm", "fr", "btwn", "bet", "bet.", ",", "between")
DATE_FORMAT_LOOKUP = dict(zip(DATE_PREF_COMBOS[0], DATE_FORMATS))
SPAN_FORMAT_LOOKUP = dict(zip(DATE_PREF_COMBOS[7], SPAN_FORMATS))
RANGE_FORMAT_LOOKUP = dict(zip(DATE_PREF_COMBOS[8], RANGE_FORMATS))
OK_PREFIXES = ABT+EST+CAL+BEF+AFT
OK_SUFFIXES = BC+AD+JULIAN+GREGORIAN
root = None
widg = None
def validate_date(parent, inwidg, final):
global root, widg
root = parent
widg = inwidg
final = find_bad_dates(final)
if final is None: return
results = make_date_dict(list(final))
if results:
final, order, compound_date_link = results
else:
return
if final is None: return
final = order_compound_dates(final, order, compound_date_link)
if final is None: return
final = make_date_string(final)
return final
def find_bad_dates(final):
final = final.replace("&", "and")
for mark in FORMAT_TO_STRIP:
final = final.replace(mark, "")
for sep in SEPTORS:
final = final.replace(sep, " ")
terms = final.split()
for term in terms:
term = term.strip()
compounds = find_word_errors(terms)
if not compounds:
return
final = find_number_errors(compounds)
return final
def count_month_words(info):
compound = False
month_words = []
for term in info:
if term.lower() in ALL_MONTHS:
month_words.append(term)
elif term.lower() in ("to", "and"):
compound = True
else:
"case not handled"
return month_words, compound
def err_done0(widg, dlg):
widg.delete(0, 'end')
dlg.destroy()
widg.focus_set()
def find_word_errors(terms):
month_words, compound = count_month_words(terms)
compound_date_link = None
comp1 = []
comp2 = []
for term in terms:
if term.lower() in ("and", "to"):
if compound_date_link is not None:
msg = open_message(
root,
dates_msg[0],
"Repeated Compound Date Link",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
compound_date_link = term
elif compound_date_link is None:
comp1.append(term)
elif compound_date_link is not None:
comp2.append(term)
else:
print("line", looky(seeline()).lineno, "case not handled:")
months = len(month_words)
if months > 1 and compound_date_link is None:
msg = open_message(
root,
dates_msg[1],
"Too Many Months Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
if months > 2:
msg = open_message(
root,
dates_msg[2],
"Too Many Months Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
elif months == 2:
pass
elif months <= 1:
for lst in (comp1, comp2):
n = 0
for item in lst:
if item.isdigit():
n += 1
if months == 1 and n > 1:
month_words2 = count_month_words(lst)[0]
if len(month_words2) == months:
pass
else:
msg = open_message(
root,
dates_msg[3],
"Day Input Without Month",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(
widg, dlg))
return
elif months == 0 and n == 1:
pass
elif months == 0 and n > 1:
msg = open_message(
root,
dates_msg[3],
"Day Input Without Month",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
for lst in (comp1, comp2):
prefixes = 0
suffixes = 0
for elem in lst:
if elem.lower() in OK_PREFIXES:
prefixes += 1
elif elem.upper() in OK_SUFFIXES:
suffixes += 1
if prefixes > 1 or suffixes > 1:
msg = open_message(
root,
dates_msg[4],
"Too Many Prefixes or Suffixes",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
return comp1, comp2, compound_date_link, compound
def standardize_month(term):
if term.startswith(OK_MONTHS):
for mo in OK_MONTHS:
if term.startswith(mo):
term = mo
break
return term
def find_number_errors(compounds):
for lst in compounds[0:2]:
nums = 0
over_two_digits = 0
lenlist = len(lst)
for item in lst:
if item.isdigit() is True:
if len(item) > 2:
if over_two_digits > 0:
msg = open_message(
root,
dates_msg[5],
"Too Many Years Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(
widg, dlg))
return
else:
over_two_digits += 1
nums += 1
if nums >= 3:
msg = open_message(
root,
dates_msg[6],
"Too Many Numerical Terms Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
elif lenlist > 5:
msg = open_message(
root,
dates_msg[7],
"Too Many Terms Input",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
if lenlist == 1 and lst[0].isalpha() is True:
msg = open_message(
root,
dates_msg[8],
"Numerical Terms Input Lacking",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
return compounds
def clarify_year(numbers, lst):
'''
For years < 100 if user types without preceding zeroes.
'''
copy = lst
head2 = "{} or {}?".format(numbers[0], numbers[1])
msg = InputMessage(
root, root=root, title="Clarify Year", ok_txt="OK",
cancel_txt="CANCEL", head1=dates_msg[11], head2=head2,
grab=True, entry=True, wraplength=300)
year = msg.show().strip()
if len(year) != 4:
msg = open_message(
root,
dates_msg[12],
"No Year Entered",
"OK")
msg[0].grab_set()
root.wait_window(msg[0])
widg.delete(0, 'end')
widg.focus_set()
return
a = 0
for num in numbers:
if int(num) == int(year):
if a == 1:
day = numbers[0]
elif a == 0:
day = numbers[1]
x = 0
for item in copy:
if item.isalpha() is False and item != day:
lst[x] = year
x += 1
break
a += 1
return year, day, lst
def make_date_dict(final):
def find_month(lst, b):
g = 0
for item in lst:
if item.isalpha():
if item.lower().startswith(OK_MONTHS):
for mo in OK_MONTHS:
if item.lower().startswith(mo):
month_key = mo
break
date_dict[b]["month"] = month_key
break
g += 1
return lst
def find_year(lst, b):
def add_zeros(lst, the_one):
fixed = the_one[0]
length = len(the_one[0])
idx = the_one[1]
if length == 2:
fixed = "00" + the_one[0]
elif length == 3:
fixed = "0" + the_one[0]
lst[idx] = fixed
return lst
num_count = []
u = 0
for item in lst:
if item.isdigit():
num_count.append((item, u))
u += 1
if len(num_count) == 1:
the_one = num_count[0]
lst = add_zeros(lst, the_one)
under_two = 0
nums = []
for item in lst:
if item.isdigit():
nums.append(item)
if len(item) < 3:
if under_two > 0:
if clarify_year(nums, lst) is None:
return
else:
year, day, lst = clarify_year(nums, lst)
date_dict[b]["year"] = year
else:
under_two += 1
elif 5 > len(item) > 2 :
date_dict[b]["year"] = item
break
elif len(item) > 4:
msg = open_message(
root,
dates_msg[13],
"Year Greater than 9999",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(
widg, dlg))
return
return lst
def find_day(lst, b):
if lst is None: return
i = 0
for item in lst:
if item.isdigit():
if len(item) > 2:
i += 1
continue
elif len(item) <= 2:
date_dict[b]["day"] = item
break
i += 1
return lst
compound_date_link, compound = final[2:]
date_dict = [{}, {}]
if len(final) == 1:
comps = [final[0]]
elif len(final) > 1:
comps = [final[0], final[1]]
b = 0
for lst in comps:
lst = find_month(lst, b)
lst = find_year(lst, b)
lst = find_day(lst, b)
comps[b] = lst
b += 1
check_days_in_months(date_dict)
order = ["ad", "ad"]
e = 0
for lst in comps:
if lst is None: return
for item in lst:
if item.upper() in BC:
order[e] = "bc"
elif item.upper() in AD:
order[e] = "ad"
e += 1
f = 0
for lst in comps:
for item in lst:
if not item.isdigit() and not item.lower().startswith(OK_MONTHS):
if item.lower() in OK_PREFIXES:
date_dict = assign_prefixes(date_dict, item, f)
elif (item in OK_SUFFIXES or
item.upper() in OK_SUFFIXES or item.title() in OK_SUFFIXES):
date_dict = assign_suffixes(date_dict, item, f)
f += 1
if compound is True:
if date_dict[0] == date_dict[1]:
msg = open_message(
root,
dates_msg[9],
"Indistinct Compound Date",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
return date_dict, order, compound_date_link
def assign_prefixes(date_dict, item, f):
item = item.lower()
if item in ABT:
term = "abt"
elif item in EST:
term = "est"
elif item in CAL:
term = "cal"
elif item in BEF:
term = "bef"
elif item in AFT:
term = "aft"
date_dict[f]["prefix"] = term
return date_dict
def assign_suffixes(date_dict, item, f):
for i in (item, item.upper(), item.title()):
if i in BC:
term = "bc"
break
elif i in AD:
term = "ad"
break
elif i in JULIAN:
term = "os"
break
elif i in GREGORIAN:
term = "ns"
break
date_dict[f]["suffix"] = term
return date_dict
def check_days_in_months(date_dict):
for dkt in date_dict:
if dkt.get("month") is None:
continue
if len(dkt) != 0:
leap_year = False
maxdays = 31
if dkt["month"] == "f":
maxdays = 28
if dkt.get("year") is not None:
if int(dkt["year"]) % 4 == 0:
maxdays = 29
else:
return
elif dkt["month"] in DAYS_30:
maxdays = 30
if dkt.get("day") and int(dkt["day"]) > maxdays:
msg = open_message(
root,
dates_msg[10],
"Too Many Days for the Month",
"OK")
msg[0].grab_set()
msg[2].config(
command=lambda widg=widg, dlg=msg[0]: err_done0(widg, dlg))
return
def order_compound_dates(final, order, compound_date_link):
if len(final[1]) == 0:
final.insert(1, "")
return final
sort1 = []
sort2 = [[], []]
u = 0
for dkt in final:
sort1.append(int(dkt["year"]))
w = 1
for mo in OK_MONTHS:
if dkt.get("month") and dkt["month"] == mo:
sort2[u].append(w)
continue
w += 1
if dkt.get("day"):
sort2[u].append(int(dkt["day"]))
dkt["sort1"] = sort1[u]
dkt["sort2"] = sort2[u]
u += 1
if order == ["ad", "ad"]:
fwd = sorted(final, key=lambda i: i["sort1"])
sort_again = fwd
if sort1[0] == sort1[1]:
sort_again = sorted(fwd, key=lambda i: i["sort2"])
sort_again.insert(1, compound_date_link)
return sort_again
elif order == ["bc", "bc"]:
rev = sorted(final, key=lambda i: i["sort1"], reverse=True)
sort_again = rev
if sort1[0] == sort1[1]:
sort_again = sorted(rev, key=lambda i: i["sort2"])
sort_again.insert(1, compound_date_link)
return sort_again
elif order == ["ad", "bc"]:
right = [final[1], final[0]]
right.insert(1, compound_date_link)
return right
elif order == ["bc", "ad"]:
final.insert(1, compound_date_link)
return final
def make_date_string(final):
def concat_parts(
prefix1="", year1="0000", month1="00", day1="00", suffix1="",
link="", prefix2="", year2="", month2="", day2="", suffix2=""):
date_string = "{}-{}-{}-{}-{}-{}-{}-{}-{}-{}-{}".format(
prefix1, year1, month1, day1, suffix1,
link, prefix2, year2, month2, day2, suffix2)
return date_string
comp1 = final[0]
link = final[1]
comp2 = final[2]
prefix1 = comp1.get("prefix", "")
year1 = comp1.get("year", "")
month1 = comp1.get("month", "")
day1 = comp1.get("day", "")
suffix1 = comp1.get("suffix", "")
if len(link) == 0:
return concat_parts(prefix1, year1, month1, day1, suffix1)
link = link
prefix2 = comp2.get("prefix", "")
year2 = comp2.get("year", "")
month2 = comp2.get("month", "")
day2 = comp2.get("day", "")
suffix2 = comp2.get("suffix", "")
return concat_parts(
prefix1, year1, month1, day1, suffix1,
link, prefix2, year2, month2, day2, suffix2)
def format_stored_date(stored_date, date_prefs=date_prefs):
''' Also used in events_table.py. '''
if stored_date == "-0000-00-00-------":
return ""
dateform = date_prefs[0]
formatted_date = ""
preprefix = ""
prefix1 = ""
year1 = ""
month1 = ""
day1 = ""
suffix1 = ""
link = ""
prefix2 = ""
year2 = ""
month2 = ""
day2 = ""
suffix2 = ""
span = False
ranje = False
compound = False
parts = stored_date.split("-")
if 'to' in parts:
span = True
compound = True
elif 'and' in parts:
ranje = True
compound = True
y = 0
for part in parts:
if len(part) == 0:
pass
elif y in (0, 6):
part = find_prefix(part, date_prefs)
if y == 0:
prefix1 = part
elif y == 6:
prefix2 = part
elif y in (1, 7):
part = part.lstrip("0")
if y == 1:
year1 = part
elif y == 7:
year2 = part
elif y in (2, 8):
part = convert_month(part, dateform)
if y == 2:
month1 = part
elif y == 8:
month2 = part
elif y in (3, 9):
part = part.lstrip("0")
if y == 3:
day1 = part
elif y == 9:
day2 = part
elif y in (4, 10):
part = find_suffix(part, date_prefs)
if y == 4:
suffix1 = part
elif y == 10:
suffix2 = part
elif y == 5:
if compound is False:
break
if span is True:
part = date_prefs[7].split("_")
preprefix = part[0]
link = part[1]
elif ranje is True:
part = date_prefs[8].split("_")
preprefix = part[0]
link = part[1]
y += 1
t = 0
for tup in ((suffix1, year1), (suffix2, year2)):
suffix = tup[0]
year = tup[1]
if suffix in AD:
if int(year) > 99:
suffix = ""
if t == 0:
suffix1 = suffix
elif t == 1:
suffix2 = suffix
t += 1
month_first_commas2 = (
preprefix, prefix1, month1, day1 + ",", year1, suffix1,
link, prefix2, month2, day2 + ",", year2, suffix2)
month_first_comma_a = (
preprefix, prefix1, month1, day1 + ",", year1, suffix1,
link, prefix2, month2, day2, year2, suffix2)
month_first_comma_b = (
preprefix, prefix1, month1, day1, year1, suffix1,
link, prefix2, month2, day2 + ",", year2, suffix2)
month_first_no_comma = (
preprefix, prefix1, month1, day1, year1, suffix1,
link, prefix2, month2, day2, year2, suffix2)
day_first = (
preprefix, prefix1, day1, month1, year1, suffix1,
link, prefix2, day2, month2, year2, suffix2)
len1 = len(day1)
len2 = len(day2)
if "dm" in dateform:
order = day_first
elif "md" in dateform:
if compound is True:
if len1 > 0 and len2 > 0:
order = month_first_commas2
elif len1 > 0 and len2 == 0:
order = month_first_comma_a
elif len1 == 0 and len2 > 0:
order = month_first_comma_b
else:
order = month_first_no_comma
else:
if len1 > 0:
order = month_first_comma_a
else:
order = month_first_no_comma
formatted_date = "{} {} {} {} {} {} {} {} {} {} {} {}".format(*order)
formatted_date = " ".join(formatted_date.split())
return formatted_date
def find_prefix(part, date_prefs):
if part == 'abt':
prefix = date_prefs[1]
elif part == 'est':
prefix = date_prefs[2]
elif part == 'cal':
prefix = date_prefs[3]
elif part in ('bef', 'aft'):
bef_aft = date_prefs[4].split("/")
if part == 'bef':
prefix = bef_aft[0]
elif part == 'aft':
prefix = bef_aft[1]
else:
prefix = ""
return prefix
def find_suffix(part, date_prefs):
if part in ("bc, ad"):
bc_ad = date_prefs[5].split("/")
if part == "bc":
suffix = bc_ad[0]
elif part == "ad":
suffix = bc_ad[1]
elif part in ("os, ns"):
os_ns = date_prefs[6].split("/")
if part == "os":
suffix = bc_ad[0]
elif part == "ns":
suffix = bc_ad[1]
else:
suffix = ""
return suffix
def convert_month(part, dateform):
month = ""
idx = 0
if 'abb' in dateform:
idx = 1
elif 'dot' in dateform:
idx = 2
for k,v in MONTH_CONVERSIONS.items():
if k == part:
month = v[idx]
break
return month
class DatePreferences(Frame):
def __init__(self, master, *args, **kwargs):
Frame.__init__(self, master, *args, **kwargs)
self.master = master
self.prefcombos = {}
self.make_widgets_top()
self.make_widgets_bottom()
def revert_to_default(self):
current_file = get_current_file()[0]
conn = sqlite3.connect(current_file)
conn.execute('PRAGMA foreign_keys = 1')
cur = conn.cursor()
cur.execute(delete_date_format_all)
conn.commit()
cur.execute(insert_date_format_default)
conn.commit()
cur.close()
conn.close()
for combo in self.prefcombos.values():
combo.entry.delete(0, 'end')
def get_new_date_prefs(self):
date_form = None
est_form = None
abt_form = None
cal_form = None
befaft_form = None
epoch_form = None
julegreg_form = None
span_form = None
range_form = None
for combo in self.prefcombos.values():
if len(combo.entry.get()) != 0:
var_form = combo.entry.get()
if combo == self.prefcombos['General']:
date_form = var_form
for k,v in DATE_FORMAT_LOOKUP.items():
if date_form == k:
date_form = v
elif combo == self.prefcombos['Estimated']:
est_form = var_form
elif combo == self.prefcombos['Approximate']:
abt_form = var_form
elif combo == self.prefcombos['Calculated']:
cal_form = var_form
elif combo == self.prefcombos['Before/After']:
befaft_form = var_form
elif combo == self.prefcombos['Epoch']:
epoch_form = var_form
elif combo == self.prefcombos['Julian/Gregorian']:
julegreg_form = var_form
elif combo == self.prefcombos['From...To...']:
span_form = var_form
for k,v in SPAN_FORMAT_LOOKUP.items():
if span_form == k:
span_form = v
elif combo == self.prefcombos['Between...And...']:
range_form = var_form
for k,v in RANGE_FORMAT_LOOKUP.items():
if range_form == k:
range_form = v
self.set_new_date_prefs(
date_form, est_form, abt_form, cal_form, befaft_form, epoch_form,
julegreg_form, span_form, range_form)
def set_new_date_prefs(self,
date_form, est_form, abt_form, cal_form, befaft_form, epoch_form,
julegreg_form, span_form, range_form):
for combo in self.prefcombos.values():
current_file = get_current_file()[0]
conn = sqlite3.connect(current_file)
conn.execute('PRAGMA foreign_keys = 1')
cur = conn.cursor()
if date_form and combo is self.prefcombos['General']:
cur.execute(update_date_format_date_formats, (date_form,))
elif est_form and combo is self.prefcombos['Estimated']:
cur.execute(update_date_format_est, (est_form,))
elif abt_form and combo is self.prefcombos['Approximate']:
cur.execute(update_date_format_abt, (abt_form,))
elif cal_form and combo is self.prefcombos['Calculated']:
cur.execute(update_date_format_cal, (cal_form,))
elif befaft_form and combo is self.prefcombos['Before/After']:
cur.execute(update_date_format_befaft, (befaft_form,))
elif epoch_form and combo is self.prefcombos['Epoch']:
cur.execute(update_date_format_epoch, (epoch_form,))
elif julegreg_form and combo is self.prefcombos['Julian/Gregorian']:
cur.execute(update_date_format_julegreg, (julegreg_form,))
elif span_form and combo is self.prefcombos['From...To...']:
cur.execute(update_date_format_span, (span_form,))
elif range_form and combo is self.prefcombos['Between...And...']:
cur.execute(update_date_format_range, (range_form,))
conn.commit()
cur.close()
conn.close()
combo.entry.delete(0, 'end')
def show_test_date_formatted(self, evt):
widg = evt.widget
storable_date = validate_date(
self.master,
widg,
widg.get())
date_prefs = get_date_formats(tree_is_open=1)
formatted_date = format_stored_date(
storable_date, date_prefs=date_prefs)
widg.delete(0, 'end')
widg.insert(0, formatted_date)
def make_widgets_top(self):
self.test_frm = Frame(self)
self.tester_head = LabelH3(
self.test_frm,
text="Date Entry Demo (doesn't affect your tree)")
DATE_ENTRIES = ['Date Input I', 'Date Input II', 'Date Input III']
self.date_test = {}
g = 0
for lab in DATE_ENTRIES:
lbl = Label(self.test_frm, text=DATE_ENTRIES[g])
lbl.grid(column=0, row= g+1, padx=24, sticky='e')
dent = EntryAutoHilited(self.test_frm)
dent.grid(column=1, row=g+1, sticky='ew')
dent.config(width=64)
dent.bind("<FocusOut>", self.show_test_date_formatted)
self.date_test[lab] = dent
g += 1
def make_widgets_bottom(self):
prefs_area = Frame(self)
buttonbox = Frame(self)
self.pref_head = LabelH2(
prefs_area, text='Set Date Display Preferences')
pref_head2 = Label(
prefs_area,
text='first value in each dropdown list is default')
pfx_lab = LabelH3(prefs_area, text='Prefixes')
sfx_lab = LabelH3(prefs_area, text='Suffixes')
cmpd_lab = LabelH3(prefs_area, text='Compound Dates')
PREF_HEADS = (
"General", "Estimated", "Approximate", "Calculated",
"Before/After", "Epoch", "Julian/Gregorian",
"From...To...", "Between...And...")
date_pref_heads = {}
p = 0
for heading in PREF_HEADS:
lab = LabelH3(prefs_area, text=PREF_HEADS[p])
date_pref_heads[heading] = lab
combo = Combobox(
prefs_area,
root,
height=300,
values=DATE_PREF_COMBOS[p])
self.prefcombos[heading] = combo
p += 1
self.submit = Button(
buttonbox,
text='SUBMIT PREFERENCES',
command=self.get_new_date_prefs,
width=30)
self.revert = Button(
buttonbox,
text='REVERT TO DEFAULT VALUES',
command=self.revert_to_default,
width=30)
# children of self
self.test_frm.grid(column=0, row=0, pady=12)
prefs_area.grid(column=0, row=1, pady=12)
buttonbox.grid(column=0, row=2, pady=12)
# children of self.test_frm
self.tester_head.grid(column=1, row=0, columnspan=4, sticky='we')
# children of prefs_area
self.pref_head.grid(column=0, row=0, columnspan=3, sticky='w', padx=(12,0))
pref_head2.grid(
column=0, row=1, columnspan=3, sticky='w', padx=(12,0))
date_pref_heads['General'].grid(column=3, row=0, padx=12)
self.prefcombos['General'].grid(column=3, row=1, padx=12, pady=(0,12))
pfx_lab.grid(column=0, row=2, sticky='w', pady=12, padx=12)
sfx_lab.grid(column=0, row=5, sticky='w', pady=12, padx=12)
cmpd_lab.grid(column=2, row=5, sticky='w', pady=12, padx=12)
date_pref_heads['Estimated'].grid(column=0, row=3, padx=12)
self.prefcombos['Estimated'].grid(column=0, row=4, padx=12, pady=(0,18))
date_pref_heads['Approximate'].grid(column=1, row=3, padx=12)
self.prefcombos['Approximate'].grid(column=1, row=4, padx=12, pady=(0,18))
date_pref_heads['Calculated'].grid(column=2, row=3, padx=12)
self.prefcombos['Calculated'].grid(column=2, row=4, padx=12, pady=(0,18))
date_pref_heads['Before/After'].grid(column=3, row=3, padx=12)
self.prefcombos['Before/After'].grid(column=3, row=4, padx=12, pady=(0,18))
date_pref_heads['Epoch'].grid(column=0, row=6, padx=12)
self.prefcombos['Epoch'].grid(column=0, row=7, padx=12, pady=(0,12))
date_pref_heads['Julian/Gregorian'].grid(column=1, row=6, padx=12)
self.prefcombos['Julian/Gregorian'].grid(
column=1, row=7, padx=12, pady=(0,12))
date_pref_heads['From...To...'].grid(column=2, row=6, padx=12)
self.prefcombos['From...To...'].grid(column=2, row=7, padx=12, pady=(0,12))
date_pref_heads['Between...And...'].grid(column=3, row=6, padx=12)
self.prefcombos['Between...And...'].grid(
column=3, row=7, padx=12, pady=(0,12))
# children of buttonbox
self.submit.grid(column=0, row=0, padx=(0,12))
self.revert.grid(column=1, row=0, padx=(12,0))
if __name__ == "__main__":
# this doesn't do anything yet
from autofill import EntryAuto
from widgets import Entry
root = tk.Tk()
inwidg = EntryAuto(root)
inwidg.grid()
inwidg.focus_set()
traverse = Entry(root)
traverse.grid()
root.mainloop() | 0.514644 | 0.348978 |
import os
from subprocess import PIPE, STDOUT
from mock import Mock
import pytest
from tests.utils import CorrectedCommand, Rule
from theplease import const
from theplease.exceptions import EmptyCommand
from theplease.system import Path
from theplease.types import Command
class TestCorrectedCommand(object):
def test_equality(self):
assert (CorrectedCommand('ls', None, 100) ==
CorrectedCommand('ls', None, 200))
assert (CorrectedCommand('ls', None, 100) !=
CorrectedCommand('ls', lambda *_: _, 100))
def test_hashable(self):
assert {CorrectedCommand('ls', None, 100),
CorrectedCommand('ls', None, 200)} == {CorrectedCommand('ls')}
def test_representable(self):
assert '{}'.format(CorrectedCommand('ls', None, 100)) == \
'CorrectedCommand(script=ls, side_effect=None, priority=100)'
assert u'{}'.format(CorrectedCommand(u'echo café', None, 100)) == \
u'CorrectedCommand(script=echo café, side_effect=None, priority=100)'
@pytest.mark.parametrize('script, printed, override_settings', [
('git branch', 'git branch', {'repeat': False, 'debug': False}),
('git brunch',
"git brunch || please --repeat --force-command 'git brunch'",
{'repeat': True, 'debug': False}),
('git brunch',
"git brunch || please --repeat --debug --force-command 'git brunch'",
{'repeat': True, 'debug': True})])
def test_run(self, capsys, settings, script, printed, override_settings):
settings.update(override_settings)
CorrectedCommand(script, None, 1000).run(Command(script, ''))
out, _ = capsys.readouterr()
assert out == printed
class TestRule(object):
def test_from_path_rule_exception(self, mocker):
load_source = mocker.patch('theplease.types.load_source',
side_effect=ImportError("No module named foo..."))
assert Rule.from_path(Path('git.py')) is None
load_source.assert_called_once_with('git', 'git.py')
def test_from_path(self, mocker):
match = object()
get_new_command = object()
load_source = mocker.patch(
'theplease.types.load_source',
return_value=Mock(match=match,
get_new_command=get_new_command,
enabled_by_default=True,
priority=900,
requires_output=True))
rule_path = os.path.join(os.sep, 'rules', 'bash.py')
assert (Rule.from_path(Path(rule_path))
== Rule('bash', match, get_new_command, priority=900))
load_source.assert_called_once_with('bash', rule_path)
def test_from_path_excluded_rule(self, mocker, settings):
load_source = mocker.patch('theplease.types.load_source')
settings.update(exclude_rules=['git'])
rule_path = os.path.join(os.sep, 'rules', 'git.py')
assert Rule.from_path(Path(rule_path)) is None
assert not load_source.called
@pytest.mark.parametrize('rules, rule, is_enabled', [
(const.DEFAULT_RULES, Rule('git', enabled_by_default=True), True),
(const.DEFAULT_RULES, Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=True), False),
(const.DEFAULT_RULES + ['git'], Rule('git', enabled_by_default=False), True),
(['git'], Rule('git', enabled_by_default=False), True)])
def test_is_enabled(self, settings, rules, rule, is_enabled):
settings.update(rules=rules)
assert rule.is_enabled == is_enabled
def test_isnt_match(self):
assert not Rule('', lambda _: False).is_match(
Command('ls', ''))
def test_is_match(self):
rule = Rule('', lambda x: x.script == 'cd ..')
assert rule.is_match(Command('cd ..', ''))
@pytest.mark.usefixtures('no_colors')
def test_isnt_match_when_rule_failed(self, capsys):
rule = Rule('test', Mock(side_effect=OSError('Denied')),
requires_output=False)
assert not rule.is_match(Command('ls', ''))
assert capsys.readouterr()[1].split('\n')[0] == '[WARN] Rule test:'
def test_get_corrected_commands_with_rule_returns_list(self):
rule = Rule(get_new_command=lambda x: [x.script + '!', x.script + '@'],
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100),
CorrectedCommand(script='test@', priority=200)])
def test_get_corrected_commands_with_rule_returns_command(self):
rule = Rule(get_new_command=lambda x: x.script + '!',
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100)])
class TestCommand(object):
@pytest.fixture(autouse=True)
def Popen(self, monkeypatch):
Popen = Mock()
Popen.return_value.stdout.read.return_value = b'output'
monkeypatch.setattr('theplease.output_readers.rerun.Popen', Popen)
return Popen
@pytest.fixture(autouse=True)
def prepare(self, monkeypatch):
monkeypatch.setattr('theplease.output_readers.rerun._wait_output',
lambda *_: True)
def test_from_script_calls(self, Popen, settings, os_environ):
settings.env = {}
assert Command.from_raw_script(
['apt-get', 'search', 'vim']) == Command(
'apt-get search vim', 'output')
Popen.assert_called_once_with('apt-get search vim',
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
env=os_environ)
@pytest.mark.parametrize('script, result', [
([], None),
([''], None),
(['', ''], None),
(['ls', '-la'], 'ls -la'),
(['ls'], 'ls')])
def test_from_script(self, script, result):
if result:
assert Command.from_raw_script(script).script == result
else:
with pytest.raises(EmptyCommand):
Command.from_raw_script(script) | tests/test_types.py |
import os
from subprocess import PIPE, STDOUT
from mock import Mock
import pytest
from tests.utils import CorrectedCommand, Rule
from theplease import const
from theplease.exceptions import EmptyCommand
from theplease.system import Path
from theplease.types import Command
class TestCorrectedCommand(object):
def test_equality(self):
assert (CorrectedCommand('ls', None, 100) ==
CorrectedCommand('ls', None, 200))
assert (CorrectedCommand('ls', None, 100) !=
CorrectedCommand('ls', lambda *_: _, 100))
def test_hashable(self):
assert {CorrectedCommand('ls', None, 100),
CorrectedCommand('ls', None, 200)} == {CorrectedCommand('ls')}
def test_representable(self):
assert '{}'.format(CorrectedCommand('ls', None, 100)) == \
'CorrectedCommand(script=ls, side_effect=None, priority=100)'
assert u'{}'.format(CorrectedCommand(u'echo café', None, 100)) == \
u'CorrectedCommand(script=echo café, side_effect=None, priority=100)'
@pytest.mark.parametrize('script, printed, override_settings', [
('git branch', 'git branch', {'repeat': False, 'debug': False}),
('git brunch',
"git brunch || please --repeat --force-command 'git brunch'",
{'repeat': True, 'debug': False}),
('git brunch',
"git brunch || please --repeat --debug --force-command 'git brunch'",
{'repeat': True, 'debug': True})])
def test_run(self, capsys, settings, script, printed, override_settings):
settings.update(override_settings)
CorrectedCommand(script, None, 1000).run(Command(script, ''))
out, _ = capsys.readouterr()
assert out == printed
class TestRule(object):
def test_from_path_rule_exception(self, mocker):
load_source = mocker.patch('theplease.types.load_source',
side_effect=ImportError("No module named foo..."))
assert Rule.from_path(Path('git.py')) is None
load_source.assert_called_once_with('git', 'git.py')
def test_from_path(self, mocker):
match = object()
get_new_command = object()
load_source = mocker.patch(
'theplease.types.load_source',
return_value=Mock(match=match,
get_new_command=get_new_command,
enabled_by_default=True,
priority=900,
requires_output=True))
rule_path = os.path.join(os.sep, 'rules', 'bash.py')
assert (Rule.from_path(Path(rule_path))
== Rule('bash', match, get_new_command, priority=900))
load_source.assert_called_once_with('bash', rule_path)
def test_from_path_excluded_rule(self, mocker, settings):
load_source = mocker.patch('theplease.types.load_source')
settings.update(exclude_rules=['git'])
rule_path = os.path.join(os.sep, 'rules', 'git.py')
assert Rule.from_path(Path(rule_path)) is None
assert not load_source.called
@pytest.mark.parametrize('rules, rule, is_enabled', [
(const.DEFAULT_RULES, Rule('git', enabled_by_default=True), True),
(const.DEFAULT_RULES, Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=True), False),
(const.DEFAULT_RULES + ['git'], Rule('git', enabled_by_default=False), True),
(['git'], Rule('git', enabled_by_default=False), True)])
def test_is_enabled(self, settings, rules, rule, is_enabled):
settings.update(rules=rules)
assert rule.is_enabled == is_enabled
def test_isnt_match(self):
assert not Rule('', lambda _: False).is_match(
Command('ls', ''))
def test_is_match(self):
rule = Rule('', lambda x: x.script == 'cd ..')
assert rule.is_match(Command('cd ..', ''))
@pytest.mark.usefixtures('no_colors')
def test_isnt_match_when_rule_failed(self, capsys):
rule = Rule('test', Mock(side_effect=OSError('Denied')),
requires_output=False)
assert not rule.is_match(Command('ls', ''))
assert capsys.readouterr()[1].split('\n')[0] == '[WARN] Rule test:'
def test_get_corrected_commands_with_rule_returns_list(self):
rule = Rule(get_new_command=lambda x: [x.script + '!', x.script + '@'],
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100),
CorrectedCommand(script='test@', priority=200)])
def test_get_corrected_commands_with_rule_returns_command(self):
rule = Rule(get_new_command=lambda x: x.script + '!',
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100)])
class TestCommand(object):
@pytest.fixture(autouse=True)
def Popen(self, monkeypatch):
Popen = Mock()
Popen.return_value.stdout.read.return_value = b'output'
monkeypatch.setattr('theplease.output_readers.rerun.Popen', Popen)
return Popen
@pytest.fixture(autouse=True)
def prepare(self, monkeypatch):
monkeypatch.setattr('theplease.output_readers.rerun._wait_output',
lambda *_: True)
def test_from_script_calls(self, Popen, settings, os_environ):
settings.env = {}
assert Command.from_raw_script(
['apt-get', 'search', 'vim']) == Command(
'apt-get search vim', 'output')
Popen.assert_called_once_with('apt-get search vim',
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
env=os_environ)
@pytest.mark.parametrize('script, result', [
([], None),
([''], None),
(['', ''], None),
(['ls', '-la'], 'ls -la'),
(['ls'], 'ls')])
def test_from_script(self, script, result):
if result:
assert Command.from_raw_script(script).script == result
else:
with pytest.raises(EmptyCommand):
Command.from_raw_script(script) | 0.377311 | 0.289033 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, verbose_name='Title')),
('slug', models.SlugField(help_text='A url friendly slug.', unique=True, verbose_name='Slug')),
('short_title', models.CharField(blank=True, help_text='A shorter title which can be used in menus etc. If this is not supplied then the normal title field will be used.', max_length=50, null=True, verbose_name='Short title')),
('template', models.CharField(max_length=250, verbose_name='Template')),
('redirect', models.CharField(blank=True, help_text='Use this to point to redirect to another page or website.', max_length=200, null=True, verbose_name='Redirect')),
('show_in_nav', models.BooleanField(default=True, verbose_name='Show in nav')),
('show_in_sitemap', models.BooleanField(default=True, verbose_name='Show in sitemap')),
('state', models.IntegerField(choices=[(1, b'Private'), (5, b'Public')], default=5, verbose_name='State')),
('created_date', models.DateTimeField(blank=True, null=True, verbose_name='Created date')),
('modified_date', models.DateTimeField(blank=True, null=True, verbose_name='Modified date')),
('publish_date', models.DateTimeField(blank=True, null=True, verbose_name='Published date')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='page_children', to='ostinato_pages.Page', verbose_name='Parent')),
],
options={
'verbose_name': 'Page',
'verbose_name_plural': 'Pages',
'permissions': (('private_view_page', '[Private] Can View Page'), ('private_edit_page', '[Private] Can Edit Page'), ('private_delete_page', '[Private] Can Delete Page'), ('can_make_public_page', 'Can Make_public Page'), ('public_view_page', '[Public] Can View Page'), ('public_edit_page', '[Public] Can Edit Page'), ('public_delete_page', '[Public] Can Delete Page'), ('can_make_private_page', 'Can Make_private Page')),
},
),
] | ostinato/pages/migrations/0001_initial.py | from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, verbose_name='Title')),
('slug', models.SlugField(help_text='A url friendly slug.', unique=True, verbose_name='Slug')),
('short_title', models.CharField(blank=True, help_text='A shorter title which can be used in menus etc. If this is not supplied then the normal title field will be used.', max_length=50, null=True, verbose_name='Short title')),
('template', models.CharField(max_length=250, verbose_name='Template')),
('redirect', models.CharField(blank=True, help_text='Use this to point to redirect to another page or website.', max_length=200, null=True, verbose_name='Redirect')),
('show_in_nav', models.BooleanField(default=True, verbose_name='Show in nav')),
('show_in_sitemap', models.BooleanField(default=True, verbose_name='Show in sitemap')),
('state', models.IntegerField(choices=[(1, b'Private'), (5, b'Public')], default=5, verbose_name='State')),
('created_date', models.DateTimeField(blank=True, null=True, verbose_name='Created date')),
('modified_date', models.DateTimeField(blank=True, null=True, verbose_name='Modified date')),
('publish_date', models.DateTimeField(blank=True, null=True, verbose_name='Published date')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='page_children', to='ostinato_pages.Page', verbose_name='Parent')),
],
options={
'verbose_name': 'Page',
'verbose_name_plural': 'Pages',
'permissions': (('private_view_page', '[Private] Can View Page'), ('private_edit_page', '[Private] Can Edit Page'), ('private_delete_page', '[Private] Can Delete Page'), ('can_make_public_page', 'Can Make_public Page'), ('public_view_page', '[Public] Can View Page'), ('public_edit_page', '[Public] Can Edit Page'), ('public_delete_page', '[Public] Can Delete Page'), ('can_make_private_page', 'Can Make_private Page')),
},
),
] | 0.613352 | 0.145844 |
import itertools
import time
import flask
from dash import Dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
from .IntegrationTests import IntegrationTests
from .utils import wait_for
class Tests(IntegrationTests):
def setUp(self):
pass
DELAY_TIME = 1
def create_race_conditions_test(endpoints):
def test(self):
app = Dash()
app.layout = html.Div([
html.Div('Hello world', id='output'),
dcc.Input(id='input', value='initial value')
])
app.scripts.config.serve_locally = True
@app.callback(
Output('output', 'children'),
[Input('input', 'value')])
def update(value):
return value
def delay():
for i, route in enumerate(endpoints):
if route in flask.request.path:
time.sleep((DELAY_TIME * i) + DELAY_TIME)
def element_text(id):
try:
return self.driver.find_element_by_id(id).text
except:
return ''
app.server.before_request(delay)
self.startServer(app)
total_delay = 0
for i in routes:
total_delay += DELAY_TIME*2 + DELAY_TIME
time.sleep(total_delay + DELAY_TIME)
wait_for(
lambda: element_text('output') == 'initial value',
lambda: '"{}" != "initial value"\nbody text: {}'.format(
element_text('output'),
element_text('react-entry-point')
)
)
self.assertTrue(self.is_console_clean())
return test
routes = [
'layout',
'dependencies',
'update-component',
'_config'
# routes and component-suites
# are other endpoints but are excluded to speed up tests
]
for route_list in itertools.permutations(routes, len(routes)):
setattr(
Tests,
'test_delayed_{}'.format(
'_'.join([
r.replace('-', '_') for r in route_list
])),
create_race_conditions_test(route_list)
) | tests/test_race_conditions.py | import itertools
import time
import flask
from dash import Dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
from .IntegrationTests import IntegrationTests
from .utils import wait_for
class Tests(IntegrationTests):
def setUp(self):
pass
DELAY_TIME = 1
def create_race_conditions_test(endpoints):
def test(self):
app = Dash()
app.layout = html.Div([
html.Div('Hello world', id='output'),
dcc.Input(id='input', value='initial value')
])
app.scripts.config.serve_locally = True
@app.callback(
Output('output', 'children'),
[Input('input', 'value')])
def update(value):
return value
def delay():
for i, route in enumerate(endpoints):
if route in flask.request.path:
time.sleep((DELAY_TIME * i) + DELAY_TIME)
def element_text(id):
try:
return self.driver.find_element_by_id(id).text
except:
return ''
app.server.before_request(delay)
self.startServer(app)
total_delay = 0
for i in routes:
total_delay += DELAY_TIME*2 + DELAY_TIME
time.sleep(total_delay + DELAY_TIME)
wait_for(
lambda: element_text('output') == 'initial value',
lambda: '"{}" != "initial value"\nbody text: {}'.format(
element_text('output'),
element_text('react-entry-point')
)
)
self.assertTrue(self.is_console_clean())
return test
routes = [
'layout',
'dependencies',
'update-component',
'_config'
# routes and component-suites
# are other endpoints but are excluded to speed up tests
]
for route_list in itertools.permutations(routes, len(routes)):
setattr(
Tests,
'test_delayed_{}'.format(
'_'.join([
r.replace('-', '_') for r in route_list
])),
create_race_conditions_test(route_list)
) | 0.257205 | 0.113727 |
from .tool.func import *
def func_upload_2(conn):
curs = conn.cursor()
if acl_check(None, 'upload') == 1:
return re_error('/ban')
if flask.request.method == 'POST':
if captcha_post(flask.request.form.get('g-recaptcha-response', flask.request.form.get('g-recaptcha', ''))) == 1:
return re_error('/error/13')
else:
captcha_post('', 0)
file_data = flask.request.files.getlist("f_data[]", None)
if not file_data:
return re_error('/error/9')
file_len = len(file_data)
if int(wiki_set(3)) * 1024 * 1024 * file_len < flask.request.content_length:
return re_error('/error/17')
if file_len == 1:
file_num = None
else:
if acl_check(None, 'many_upload') == 1:
return re_error('/ban')
file_num = 1
for data in file_data:
value = os.path.splitext(data.filename)[1]
curs.execute(db_change("select html from html_filter where kind = 'extension'"))
extension = [i[0].lower() for i in curs.fetchall()]
if not re.sub(r'^\.', '', value).lower() in extension:
return re_error('/error/14')
if flask.request.form.get('f_name', None):
name = flask.request.form.get('f_name', None) + (' ' + str(file_num) if file_num else '') + value
else:
name = data.filename
piece = os.path.splitext(name)
if re.search(r'[^ㄱ-힣0-9a-zA-Z_\- ]', piece[0]):
return re_error('/error/22')
e_data = sha224_replace(piece[0]) + piece[1]
curs.execute(db_change("select title from data where title = ?"), ['file:' + name])
if curs.fetchall():
return re_error('/error/16')
curs.execute(db_change("select html from html_filter where kind = 'file'"))
db_data = curs.fetchall()
for i in db_data:
t_re = re.compile(i[0])
if t_re.search(name):
return redirect('/file_filter')
if os.path.exists(os.path.join(app_var['path_data_image'], e_data)):
os.remove(os.path.join(app_var['path_data_image'], e_data))
data.save(os.path.join(app_var['path_data_image'], e_data))
else:
data.save(os.path.join(app_var['path_data_image'], e_data))
ip = ip_check()
g_lice = flask.request.form.get('f_lice', '')
file_size = os.stat(os.path.join(app_var['path_data_image'], e_data)).st_size
curs.execute(db_change("select data from other where name = 'markup'"))
db_data = curs.fetchall()
if db_data and db_data[0][0] == 'namumark':
file_d = '' + \
'[[file:' + name + ']]\n' + \
'{{{[[file:' + name + ']]}}}\n\n' + \
(g_lice + '\n' if g_lice != '' else '') + \
flask.request.form.get('f_lice_sel', 'direct_input') + '\n' + \
(ip if ip_or_user(ip) != 0 else '[[user:' + ip + ']]') + '\n' + \
str(file_size) + ' Byte\n' + \
'[[category:' + re.sub(r'\]', '_', flask.request.form.get('f_lice_sel', '')) + ']]' + \
''
else:
file_d = '' + \
'/image/' + e_data + '\n\n' + \
(g_lice + '\n' if g_lice != '' else '') + \
flask.request.form.get('f_lice_sel', 'direct_input') + '\n' + \
ip + \
str(file_size) + ' Byte\n' + \
''
curs.execute(db_change("insert into data (title, data) values (?, ?)"), ['file:' + name, file_d])
curs.execute(db_change("insert into acl (title, decu, dis, why, view) values (?, 'admin', '', '', '')"), ['file:' + name])
render_set(
title = 'file:' + name,
data = file_d,
num = 1
)
history_plus(
'file:' + name,
file_d,
get_time(),
ip,
ip,
'0',
'upload'
)
if file_num:
file_num += 1
conn.commit()
return redirect('/w/file:' + name)
else:
license_list = '''
<option value="direct_input">''' + load_lang('direct_input') + '''</option>
'''
curs.execute(db_change("select html from html_filter where kind = 'image_license'"))
db_data = curs.fetchall()
for i in db_data:
license_list += '''
<option value="''' + i[0] + '''">''' + i[0] + '''</option>
'''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('upload'), wiki_set(), custom(), other2([0, 0])],
data = '''
<a href="/file_filter">(''' + load_lang('file_filter_list') + ''')</a>
<hr class=\"main_hr\">
''' + load_lang('max_file_size') + ''' : ''' + wiki_set(3) + '''MB
<hr class=\"main_hr\">
<form method="post" enctype="multipart/form-data" accept-charset="utf8">
<input multiple="multiple" type="file" name="f_data[]">
<hr class=\"main_hr\">
<input placeholder="''' + load_lang('file_name') + '''" name="f_name" value="''' + flask.request.args.get('name', '') + '''">
<hr class=\"main_hr\">
<select name="f_lice_sel">
''' + license_list + '''
</select>
<hr class=\"main_hr\">
<textarea rows="10" placeholder="''' + load_lang('other') + '''" name="f_lice"></textarea>
<hr class=\"main_hr\">
''' + captcha_get() + '''
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['other', load_lang('return')]]
)) | route/func_upload.py | from .tool.func import *
def func_upload_2(conn):
curs = conn.cursor()
if acl_check(None, 'upload') == 1:
return re_error('/ban')
if flask.request.method == 'POST':
if captcha_post(flask.request.form.get('g-recaptcha-response', flask.request.form.get('g-recaptcha', ''))) == 1:
return re_error('/error/13')
else:
captcha_post('', 0)
file_data = flask.request.files.getlist("f_data[]", None)
if not file_data:
return re_error('/error/9')
file_len = len(file_data)
if int(wiki_set(3)) * 1024 * 1024 * file_len < flask.request.content_length:
return re_error('/error/17')
if file_len == 1:
file_num = None
else:
if acl_check(None, 'many_upload') == 1:
return re_error('/ban')
file_num = 1
for data in file_data:
value = os.path.splitext(data.filename)[1]
curs.execute(db_change("select html from html_filter where kind = 'extension'"))
extension = [i[0].lower() for i in curs.fetchall()]
if not re.sub(r'^\.', '', value).lower() in extension:
return re_error('/error/14')
if flask.request.form.get('f_name', None):
name = flask.request.form.get('f_name', None) + (' ' + str(file_num) if file_num else '') + value
else:
name = data.filename
piece = os.path.splitext(name)
if re.search(r'[^ㄱ-힣0-9a-zA-Z_\- ]', piece[0]):
return re_error('/error/22')
e_data = sha224_replace(piece[0]) + piece[1]
curs.execute(db_change("select title from data where title = ?"), ['file:' + name])
if curs.fetchall():
return re_error('/error/16')
curs.execute(db_change("select html from html_filter where kind = 'file'"))
db_data = curs.fetchall()
for i in db_data:
t_re = re.compile(i[0])
if t_re.search(name):
return redirect('/file_filter')
if os.path.exists(os.path.join(app_var['path_data_image'], e_data)):
os.remove(os.path.join(app_var['path_data_image'], e_data))
data.save(os.path.join(app_var['path_data_image'], e_data))
else:
data.save(os.path.join(app_var['path_data_image'], e_data))
ip = ip_check()
g_lice = flask.request.form.get('f_lice', '')
file_size = os.stat(os.path.join(app_var['path_data_image'], e_data)).st_size
curs.execute(db_change("select data from other where name = 'markup'"))
db_data = curs.fetchall()
if db_data and db_data[0][0] == 'namumark':
file_d = '' + \
'[[file:' + name + ']]\n' + \
'{{{[[file:' + name + ']]}}}\n\n' + \
(g_lice + '\n' if g_lice != '' else '') + \
flask.request.form.get('f_lice_sel', 'direct_input') + '\n' + \
(ip if ip_or_user(ip) != 0 else '[[user:' + ip + ']]') + '\n' + \
str(file_size) + ' Byte\n' + \
'[[category:' + re.sub(r'\]', '_', flask.request.form.get('f_lice_sel', '')) + ']]' + \
''
else:
file_d = '' + \
'/image/' + e_data + '\n\n' + \
(g_lice + '\n' if g_lice != '' else '') + \
flask.request.form.get('f_lice_sel', 'direct_input') + '\n' + \
ip + \
str(file_size) + ' Byte\n' + \
''
curs.execute(db_change("insert into data (title, data) values (?, ?)"), ['file:' + name, file_d])
curs.execute(db_change("insert into acl (title, decu, dis, why, view) values (?, 'admin', '', '', '')"), ['file:' + name])
render_set(
title = 'file:' + name,
data = file_d,
num = 1
)
history_plus(
'file:' + name,
file_d,
get_time(),
ip,
ip,
'0',
'upload'
)
if file_num:
file_num += 1
conn.commit()
return redirect('/w/file:' + name)
else:
license_list = '''
<option value="direct_input">''' + load_lang('direct_input') + '''</option>
'''
curs.execute(db_change("select html from html_filter where kind = 'image_license'"))
db_data = curs.fetchall()
for i in db_data:
license_list += '''
<option value="''' + i[0] + '''">''' + i[0] + '''</option>
'''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('upload'), wiki_set(), custom(), other2([0, 0])],
data = '''
<a href="/file_filter">(''' + load_lang('file_filter_list') + ''')</a>
<hr class=\"main_hr\">
''' + load_lang('max_file_size') + ''' : ''' + wiki_set(3) + '''MB
<hr class=\"main_hr\">
<form method="post" enctype="multipart/form-data" accept-charset="utf8">
<input multiple="multiple" type="file" name="f_data[]">
<hr class=\"main_hr\">
<input placeholder="''' + load_lang('file_name') + '''" name="f_name" value="''' + flask.request.args.get('name', '') + '''">
<hr class=\"main_hr\">
<select name="f_lice_sel">
''' + license_list + '''
</select>
<hr class=\"main_hr\">
<textarea rows="10" placeholder="''' + load_lang('other') + '''" name="f_lice"></textarea>
<hr class=\"main_hr\">
''' + captcha_get() + '''
<button id="save" type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['other', load_lang('return')]]
)) | 0.21036 | 0.083778 |
import vField
import numpy as np
def transformField( field, scale, translate ):
'''Performs a transformation on the domain of a vector field.
@param field An instance of vField.VectorField. This will be changed
IN PLACE.
@param scale A 2-tuple of floats. The x- and y-scale values.
@param translate A 2-tuple of floats. The x- and y-translate values.
'''
print "Transforming field:"
print "\tMin point:", field.minPoint
print "\tSize:", field.size
print "\tCell size:", field.cellSize
if ( scale[0] != 1.0 or scale[1] != 1.0 ):
print "Scaling", scale
# cache magnitudes
field.minPoint[0] *= scale[0]
field.minPoint[1] *= scale[1]
field.cellSize *= abs( scale[0] )
field.size[0] *= abs(scale[0])
field.size[1] *= abs(scale[1])
if ( scale[0] < 0 ):
# reverse all x-directions
field.data[ :, :, 0 ] = -field.data[ :, :, 0 ]
field.data[ :, :, : ] = field.data[ :, ::-1, : ]
field.minPoint[0] -= field.size[1]
if ( scale[1] < 0 ):
# reverse all y-directions
field.data[ :, :, 1 ] = -field.data[ :, :, 1 ]
field.data[ :, :, : ] = field.data[ ::-1, :, : ]
field.minPoint[1] -= field.size[0]
if ( translate[0] != 0.0 or translate[1] != 0.0 ):
field.minPoint[0] += translate[0]
field.minPoint[1] += translate[1]
print "Transformed field:"
print "\tMin point:", field.minPoint
print "\tSize:", field.size
print "\tCell size:", field.cellSize
def main():
parser = optparse.OptionParser()
parser.set_description( 'Transform the DOMAIN of a vector field. Although negatively scaling a vector field WILL reverse the vector directions. Finally, scale is applied before transformation' )
parser.add_option( '-i', '--input', help='The name of the vector field file to transform',
action='store', dest='inFileName', default=None )
parser.add_option( '-o', '--output', help='The name of the output vectof field file to write.',
action='store', dest='outFileName', default=None )
parser.add_option( '-s', '--scale', help='A pair of values indicating the x and y scale values. NOTE: this merely transforms the domain; vector values will maintain previous magnitude. The field only supports SQUARE cells so |scale x| == |scale y| must be true.',
nargs=2, action='store', dest='scale', type='float', default=(1.0,1.0) )
parser.add_option( '-t', '--transoate', help='A pair of values indicating the x and y translate values.',
nargs=2, action='store', dest='translate', type='float', default=(0.0,0.0) )
options, args = parser.parse_args()
# validate
if ( options.inFileName is None ):
parser.print_help()
print '\n!!! You must specify an input file'
sys.exit(1)
if ( options.inFileName is None ):
parser.print_help()
print '\n!!! You must specify an output file'
sys.exit(1)
if ( abs( options.scale[0] ) != abs( options.scale[1] ) ):
parser.print_help()
print '\n!!! The scale values must have the same magnitude to maintain square grid cells!'
sys.exit(1)
field = vField.VectorField( (0,0), (1, 1), 1 )
field.read( options.inFileName )
# transform the data
transformField( field, options.scale, options.translate )
# export the data
field.writeAscii( options.outFileName )
if __name__ == '__main__':
import sys
import optparse
main() | out/xformVectorField.py |
import vField
import numpy as np
def transformField( field, scale, translate ):
'''Performs a transformation on the domain of a vector field.
@param field An instance of vField.VectorField. This will be changed
IN PLACE.
@param scale A 2-tuple of floats. The x- and y-scale values.
@param translate A 2-tuple of floats. The x- and y-translate values.
'''
print "Transforming field:"
print "\tMin point:", field.minPoint
print "\tSize:", field.size
print "\tCell size:", field.cellSize
if ( scale[0] != 1.0 or scale[1] != 1.0 ):
print "Scaling", scale
# cache magnitudes
field.minPoint[0] *= scale[0]
field.minPoint[1] *= scale[1]
field.cellSize *= abs( scale[0] )
field.size[0] *= abs(scale[0])
field.size[1] *= abs(scale[1])
if ( scale[0] < 0 ):
# reverse all x-directions
field.data[ :, :, 0 ] = -field.data[ :, :, 0 ]
field.data[ :, :, : ] = field.data[ :, ::-1, : ]
field.minPoint[0] -= field.size[1]
if ( scale[1] < 0 ):
# reverse all y-directions
field.data[ :, :, 1 ] = -field.data[ :, :, 1 ]
field.data[ :, :, : ] = field.data[ ::-1, :, : ]
field.minPoint[1] -= field.size[0]
if ( translate[0] != 0.0 or translate[1] != 0.0 ):
field.minPoint[0] += translate[0]
field.minPoint[1] += translate[1]
print "Transformed field:"
print "\tMin point:", field.minPoint
print "\tSize:", field.size
print "\tCell size:", field.cellSize
def main():
parser = optparse.OptionParser()
parser.set_description( 'Transform the DOMAIN of a vector field. Although negatively scaling a vector field WILL reverse the vector directions. Finally, scale is applied before transformation' )
parser.add_option( '-i', '--input', help='The name of the vector field file to transform',
action='store', dest='inFileName', default=None )
parser.add_option( '-o', '--output', help='The name of the output vectof field file to write.',
action='store', dest='outFileName', default=None )
parser.add_option( '-s', '--scale', help='A pair of values indicating the x and y scale values. NOTE: this merely transforms the domain; vector values will maintain previous magnitude. The field only supports SQUARE cells so |scale x| == |scale y| must be true.',
nargs=2, action='store', dest='scale', type='float', default=(1.0,1.0) )
parser.add_option( '-t', '--transoate', help='A pair of values indicating the x and y translate values.',
nargs=2, action='store', dest='translate', type='float', default=(0.0,0.0) )
options, args = parser.parse_args()
# validate
if ( options.inFileName is None ):
parser.print_help()
print '\n!!! You must specify an input file'
sys.exit(1)
if ( options.inFileName is None ):
parser.print_help()
print '\n!!! You must specify an output file'
sys.exit(1)
if ( abs( options.scale[0] ) != abs( options.scale[1] ) ):
parser.print_help()
print '\n!!! The scale values must have the same magnitude to maintain square grid cells!'
sys.exit(1)
field = vField.VectorField( (0,0), (1, 1), 1 )
field.read( options.inFileName )
# transform the data
transformField( field, options.scale, options.translate )
# export the data
field.writeAscii( options.outFileName )
if __name__ == '__main__':
import sys
import optparse
main() | 0.548432 | 0.605099 |
import math
import tkinter as tk
from dusted import geom, utils
class LevelView(tk.Canvas):
def __init__(self, parent, level, cursor):
super().__init__(parent, height=0)
self.level = level
self.level.subscribe(self.on_level_change)
self.cursor = cursor
self.cursor.subscribe(self.on_cursor_move)
self.bind("<Button-4>", self.on_scroll) # Linux
self.bind("<Button-5>", self.on_scroll)
self.bind("<MouseWheel>", self.on_scroll) # Windows
self.bind("<Button-1>", self.on_click)
self.bind("<B1-Motion>", self.on_drag)
self.bind("<Button-3>", self.on_right_click)
self.bind("<B3-Motion>", self.on_right_click)
self.bind("<Shift-Button-3>", lambda e: self.on_right_click(e, True))
self.bind("<Shift-B3-Motion>", lambda e: self.on_right_click(e, True))
self.reset()
def reset(self):
self.zoom_level = 1
self.offset_x = self.offset_y = 0
self.prev_mx = self.prev_my = 0
self.coords = []
self.path_objects = []
self.position_object = None
self.delete("all")
def on_level_change(self):
self.reset()
level_data = utils.load_level(self.level.get())
tiles = {(x, y) for (l, x, y), t in level_data.tiles.items() if l == 19}
outlines = geom.tile_outlines(tiles)
for outline in outlines:
self.create_polygon(*[(48 * x, 48 * y) for x, y in outline[0]], fill="#bbb")
for hole in outline[1:]:
self.create_polygon(*[(48 * x, 48 * y) for x, y in hole], fill="#d9d9d9")
# Pan to level start
start = level_data.start_position()
width = self.winfo_width()
height = self.winfo_height()
self.pan(width // 2 - start.x, height // 2 - start.y)
def select_frame(self, frame):
if self.position_object is not None:
self.delete(self.position_object)
if 0 <= frame < len(self.coords):
x, y = self.coords[frame]
self.position_object = self.create_rectangle(x - 24, y - 48, x + 24, y + 48)
self.fix_object(self.position_object)
else:
self.position_object = None
def add_coordinate(self, frame, x, y):
if frame < len(self.coords): # Clear suffix
for i in self.path_objects[max(0, frame - 1):]:
self.delete(i)
self.path_objects = self.path_objects[:max(0, frame - 1)]
self.coords = self.coords[:frame]
elif frame > len(self.coords): # Loaded state in the future, pad values
self.path_objects.extend([-1] * (frame - min(1, len(self.coords)) + 1))
self.coords.extend([(x, y)] * (frame - len(self.coords) + 1))
return
self.coords.append((x, y))
if frame > 0:
i = self.create_line(*self.coords[frame - 1], *self.coords[frame])
self.fix_object(i)
self.path_objects.append(i)
def fix_object(self, i):
self.scale(i, 0, 0, self.zoom_level, self.zoom_level)
self.move(i, self.offset_x, self.offset_y)
def zoom(self, x, y, scale):
self.zoom_level *= scale
self.offset_x = (self.offset_x - x) * scale + x
self.offset_y = (self.offset_y - y) * scale + y
self.scale("all", x, y, scale, scale)
def pan(self, dx, dy):
self.offset_x += dx
self.offset_y += dy
self.move("all", dx, dy)
def on_cursor_move(self):
self.select_frame(self.cursor.current_col)
def on_scroll(self, event):
if event.num == 4:
scale = 1.25
elif event.num == 5:
scale = 0.8
else:
scale = pow(1.25, event.delta // 120)
self.zoom(event.x, event.y, scale)
def on_click(self, event):
self.prev_mx = event.x
self.prev_my = event.y
def on_drag(self, event):
dx = event.x - self.prev_mx
dy = event.y - self.prev_my
self.pan(dx, dy)
self.prev_mx = event.x
self.prev_my = event.y
def on_right_click(self, event, keep_selection=False):
cx = (event.x - self.offset_x) / self.zoom_level
cy = (event.y - self.offset_y) / self.zoom_level
closest = None
dist = 1e10
for i, (x, y) in enumerate(self.coords):
d = math.hypot(cx - x, cy - y)
if d < dist:
dist = d
closest = i
if closest is not None:
row, _ = self.cursor.position()
self.cursor.set(row, closest, keep_selection) | dusted/level_view.py | import math
import tkinter as tk
from dusted import geom, utils
class LevelView(tk.Canvas):
def __init__(self, parent, level, cursor):
super().__init__(parent, height=0)
self.level = level
self.level.subscribe(self.on_level_change)
self.cursor = cursor
self.cursor.subscribe(self.on_cursor_move)
self.bind("<Button-4>", self.on_scroll) # Linux
self.bind("<Button-5>", self.on_scroll)
self.bind("<MouseWheel>", self.on_scroll) # Windows
self.bind("<Button-1>", self.on_click)
self.bind("<B1-Motion>", self.on_drag)
self.bind("<Button-3>", self.on_right_click)
self.bind("<B3-Motion>", self.on_right_click)
self.bind("<Shift-Button-3>", lambda e: self.on_right_click(e, True))
self.bind("<Shift-B3-Motion>", lambda e: self.on_right_click(e, True))
self.reset()
def reset(self):
self.zoom_level = 1
self.offset_x = self.offset_y = 0
self.prev_mx = self.prev_my = 0
self.coords = []
self.path_objects = []
self.position_object = None
self.delete("all")
def on_level_change(self):
self.reset()
level_data = utils.load_level(self.level.get())
tiles = {(x, y) for (l, x, y), t in level_data.tiles.items() if l == 19}
outlines = geom.tile_outlines(tiles)
for outline in outlines:
self.create_polygon(*[(48 * x, 48 * y) for x, y in outline[0]], fill="#bbb")
for hole in outline[1:]:
self.create_polygon(*[(48 * x, 48 * y) for x, y in hole], fill="#d9d9d9")
# Pan to level start
start = level_data.start_position()
width = self.winfo_width()
height = self.winfo_height()
self.pan(width // 2 - start.x, height // 2 - start.y)
def select_frame(self, frame):
if self.position_object is not None:
self.delete(self.position_object)
if 0 <= frame < len(self.coords):
x, y = self.coords[frame]
self.position_object = self.create_rectangle(x - 24, y - 48, x + 24, y + 48)
self.fix_object(self.position_object)
else:
self.position_object = None
def add_coordinate(self, frame, x, y):
if frame < len(self.coords): # Clear suffix
for i in self.path_objects[max(0, frame - 1):]:
self.delete(i)
self.path_objects = self.path_objects[:max(0, frame - 1)]
self.coords = self.coords[:frame]
elif frame > len(self.coords): # Loaded state in the future, pad values
self.path_objects.extend([-1] * (frame - min(1, len(self.coords)) + 1))
self.coords.extend([(x, y)] * (frame - len(self.coords) + 1))
return
self.coords.append((x, y))
if frame > 0:
i = self.create_line(*self.coords[frame - 1], *self.coords[frame])
self.fix_object(i)
self.path_objects.append(i)
def fix_object(self, i):
self.scale(i, 0, 0, self.zoom_level, self.zoom_level)
self.move(i, self.offset_x, self.offset_y)
def zoom(self, x, y, scale):
self.zoom_level *= scale
self.offset_x = (self.offset_x - x) * scale + x
self.offset_y = (self.offset_y - y) * scale + y
self.scale("all", x, y, scale, scale)
def pan(self, dx, dy):
self.offset_x += dx
self.offset_y += dy
self.move("all", dx, dy)
def on_cursor_move(self):
self.select_frame(self.cursor.current_col)
def on_scroll(self, event):
if event.num == 4:
scale = 1.25
elif event.num == 5:
scale = 0.8
else:
scale = pow(1.25, event.delta // 120)
self.zoom(event.x, event.y, scale)
def on_click(self, event):
self.prev_mx = event.x
self.prev_my = event.y
def on_drag(self, event):
dx = event.x - self.prev_mx
dy = event.y - self.prev_my
self.pan(dx, dy)
self.prev_mx = event.x
self.prev_my = event.y
def on_right_click(self, event, keep_selection=False):
cx = (event.x - self.offset_x) / self.zoom_level
cy = (event.y - self.offset_y) / self.zoom_level
closest = None
dist = 1e10
for i, (x, y) in enumerate(self.coords):
d = math.hypot(cx - x, cy - y)
if d < dist:
dist = d
closest = i
if closest is not None:
row, _ = self.cursor.position()
self.cursor.set(row, closest, keep_selection) | 0.50952 | 0.185762 |
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Tuple
import numpy as np
import pandapower as pp
import pandas as pd
from pandapower.control import ConstControl
from pandapower.timeseries import DFData, OutputWriter, run_timeseries
from tqdm import tqdm
from conf.conf import SIM_DIR
from src.simulation.network import NetData
"""
Implementation of the PandaPower simulation tool, to generate voltages and currents from network and loads.
Copyright @donelef, @jbrouill on GitHub
"""
@dataclass
class SimulationResult(object):
"""
Data class to store voltages, loads and loading percentage of the grid.
This is generated by the PandaPower simulation and can be read/written to files.
"""
vm_pu: pd.DataFrame
va_degree: pd.DataFrame
p_mw: pd.DataFrame
q_mvar: pd.DataFrame
loading_percent: pd.DataFrame
result_path: Path
@staticmethod
def from_dir(dir_path: Path):
return SimulationResult(
result_path=dir_path,
**{f.stem: pd.read_csv(f, sep=";", index_col=0) for f in dir_path.rglob("*.csv")}
)
class SimulatedNet(NetData):
def __init__(self, ns: list = None, ls: list = None, other=None):
NetData.__init__(self, ns, ls, other)
self.sim_result = None
def run(self, load_p: np.array, load_q: np.array, output_path: Path = SIM_DIR,
verbose: bool = True, **kwargs):
"""
Runs the simulation of a PandaPower network to obtain grid voltages based on loads.
It saves the results in a file as well.
:param load_p: T-by-n array of active loads as numpy array
:param load_q: T-by-n array of reactive loads as numpy array
:param output_path: path of the file to generate with the data
:param verbose: verbose on/off
:param kwargs: additional arguments for PandaPower
:return: SimulationResult object with the generated data
"""
variables = ['p_a_mw', 'p_b_mw', 'p_c_mw', 'q_a_mvar', 'q_b_mvar', 'q_c_mvar']
controlled_net = self.deepcopy()
load_df = DFData(pd.DataFrame(load_p, columns=self.load.index))
ConstControl(controlled_net, element='load', element_index=self.load.index,
variable='p_mw', data_source=load_df, profile_name=self.load.index)
load_df = DFData(pd.DataFrame(load_q, columns=self.load.index))
ConstControl(controlled_net, element='load', element_index=self.load.index,
variable='q_mvar', data_source=load_df, profile_name=self.load.index)
# Create file path and writer
timed_out_path = output_path / f"sim_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
ow = OutputWriter(controlled_net, output_path=timed_out_path, output_file_type=".csv")
# Define variables to save
ow.log_variable("res_line", "loading_percent")
for v in ["vm_pu", "va_degree", "p_mw", "q_mvar"]:
ow.log_variable("res_bus", v)
# Run network simulation
run_timeseries(controlled_net, verbose=verbose, numba=False, **kwargs)
self.sim_result = SimulationResult.from_dir(timed_out_path)
return self
def get_current_and_voltage(self) -> Tuple[np.array, np.array]:
"""
Extracts the results of a simulation into usable matrices of voltages and currents.
:param sim_result: results of a simulation
:param y_bus: admittance matrix of the network
:return: tuple of two T-by-n numpy arrays of voltages and currents respectively
"""
if self.sim_result is None:
return None, None
va_rad = self.sim_result.va_degree.values * np.pi / 180
voltage = self.sim_result.vm_pu.values * (np.cos(va_rad) + 1j * np.sin(va_rad))
current = voltage @ self.make_y_bus()
return voltage, current | src/simulation/simulation.py | from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Tuple
import numpy as np
import pandapower as pp
import pandas as pd
from pandapower.control import ConstControl
from pandapower.timeseries import DFData, OutputWriter, run_timeseries
from tqdm import tqdm
from conf.conf import SIM_DIR
from src.simulation.network import NetData
"""
Implementation of the PandaPower simulation tool, to generate voltages and currents from network and loads.
Copyright @donelef, @jbrouill on GitHub
"""
@dataclass
class SimulationResult(object):
"""
Data class to store voltages, loads and loading percentage of the grid.
This is generated by the PandaPower simulation and can be read/written to files.
"""
vm_pu: pd.DataFrame
va_degree: pd.DataFrame
p_mw: pd.DataFrame
q_mvar: pd.DataFrame
loading_percent: pd.DataFrame
result_path: Path
@staticmethod
def from_dir(dir_path: Path):
return SimulationResult(
result_path=dir_path,
**{f.stem: pd.read_csv(f, sep=";", index_col=0) for f in dir_path.rglob("*.csv")}
)
class SimulatedNet(NetData):
def __init__(self, ns: list = None, ls: list = None, other=None):
NetData.__init__(self, ns, ls, other)
self.sim_result = None
def run(self, load_p: np.array, load_q: np.array, output_path: Path = SIM_DIR,
verbose: bool = True, **kwargs):
"""
Runs the simulation of a PandaPower network to obtain grid voltages based on loads.
It saves the results in a file as well.
:param load_p: T-by-n array of active loads as numpy array
:param load_q: T-by-n array of reactive loads as numpy array
:param output_path: path of the file to generate with the data
:param verbose: verbose on/off
:param kwargs: additional arguments for PandaPower
:return: SimulationResult object with the generated data
"""
variables = ['p_a_mw', 'p_b_mw', 'p_c_mw', 'q_a_mvar', 'q_b_mvar', 'q_c_mvar']
controlled_net = self.deepcopy()
load_df = DFData(pd.DataFrame(load_p, columns=self.load.index))
ConstControl(controlled_net, element='load', element_index=self.load.index,
variable='p_mw', data_source=load_df, profile_name=self.load.index)
load_df = DFData(pd.DataFrame(load_q, columns=self.load.index))
ConstControl(controlled_net, element='load', element_index=self.load.index,
variable='q_mvar', data_source=load_df, profile_name=self.load.index)
# Create file path and writer
timed_out_path = output_path / f"sim_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
ow = OutputWriter(controlled_net, output_path=timed_out_path, output_file_type=".csv")
# Define variables to save
ow.log_variable("res_line", "loading_percent")
for v in ["vm_pu", "va_degree", "p_mw", "q_mvar"]:
ow.log_variable("res_bus", v)
# Run network simulation
run_timeseries(controlled_net, verbose=verbose, numba=False, **kwargs)
self.sim_result = SimulationResult.from_dir(timed_out_path)
return self
def get_current_and_voltage(self) -> Tuple[np.array, np.array]:
"""
Extracts the results of a simulation into usable matrices of voltages and currents.
:param sim_result: results of a simulation
:param y_bus: admittance matrix of the network
:return: tuple of two T-by-n numpy arrays of voltages and currents respectively
"""
if self.sim_result is None:
return None, None
va_rad = self.sim_result.va_degree.values * np.pi / 180
voltage = self.sim_result.vm_pu.values * (np.cos(va_rad) + 1j * np.sin(va_rad))
current = voltage @ self.make_y_bus()
return voltage, current | 0.903741 | 0.447158 |
import copy
import itertools
from typing import List, Iterable
relation_mandatory_args = {
"Accident": {"location", "trigger"},
"CanceledRoute": {"location", "trigger"},
"CanceledStop": {"location", "trigger"},
"Delay": {"location", "trigger"},
"Disaster": {"type", "location"},
"Obstruction": {"location", "trigger"},
"RailReplacementService": {"location", "trigger"},
"TrafficJam": {"location", "trigger"},
"Acquisition": {"buyer", "acquired"},
"Insolvency": {"company", "trigger"},
"Layoffs": {"company", "trigger"},
"Merger": {"old", "new"},
"OrganizationLeadership": {"organization", "person"},
"SpinOff": {"parent", "child"},
"Strike": {"company", "trigger"}
}
class Span:
def __init__(self, pos1, pos2, role: str = None, text: str = None):
self.span = (int(pos1), int(pos2))
self.role = role
self.text = text
@property
def start(self):
return self.span[0]
@property
def end(self):
return self.span[1]
def to_josn(self):
return {
"start": self.span[0],
"end": self.span[1],
"name": self.role if self.role else "",
}
def __repr__(self):
return f"[{repr(self.span)}, {self.role if self.role is not None else ''}]"
def __eq__(self, other):
return self.role == other.role and self.span == other.span
def __hash__(self):
return hash((self.span, self.role))
class Relation:
def __init__(self, spans: List[Span], label: str = None, allow_overlaps=False):
if not len(spans) >= 1:
raise AttributeError("Tried to create Relation with no spans")
# TODO ist das gut so? Review
if not allow_overlaps:
for s1, s2 in itertools.combinations(spans, 2):
if s2.start <= s1.start <= s2.end or s2.start <= s1.end <= s2.end:
# Found conflicting spans!
if s1.start < s2.start:
spans.remove(s2)
elif s2.start < s1.start:
spans.remove(s1)
elif s1.end < s2.end:
spans.remove(s2)
elif s2.end < s1.end:
spans.remove(s1)
elif s1.role < s2.role:
spans.remove(s2)
else:
spans.remove(s1)
self.spans = spans
self.label = label
@property
def start(self):
return min((s.start for s in self.spans))
@property
def end(self):
return max((s.end for s in self.spans))
def get_bio_tags(self, n: int, mode: str = None) -> Iterable[str]:
if not mode:
mode = ""
n_m = max(n, self.end + 1)
tags = ["O"] * n_m
for span in self.spans:
if tags[span.start: span.end + 1] != ["O"] * (1 + span.end - span.start):
raise RuntimeError(f"Nested argument types for relation {self}, "
f"cannot build well-defined tag sequence.")
tag_post = f"{self.label}-{span.role}"
if mode != "":
tag_post = f"{mode}-{tag_post}"
tags[span.start] = f"B-{tag_post}" if mode == "" else f"B-{tag_post}"
if span.end > span.start:
tags[span.start + 1:span.end + 1] = [f"I-{tag_post}"] * (span.end - span.start)
return tags[:n]
@property
def entities(self):
entities = copy.deepcopy(self.spans)
for ent, span in zip(entities, self.spans):
ent.role = f"{self.label}-{span.role}"
return entities
def to_json(self):
return {
"name": self.label,
"ents": [s.to_josn() for s in self.spans],
}
def __repr__(self):
return f"{{({repr(self.spans)}, {self.label if self.label is not None else ''})}}"
def __eq__(self, other):
return {s for s in self.spans} == {s for s in other.spans} and self.label == other.label
def __hash__(self):
return hash(self.label)
def extract_relations_from_smart_sample(sample, only_mandatory=False, include_trigger=True):
token_borders = [(t["span"]["start"], t["span"]["end"]) for t in sample["tokens"]]
relations = []
for relation in sample["relationMentions"]:
relation_name = relation["name"]
spans = []
for argument in relation["args"]:
if only_mandatory and argument["role"] not in relation_mandatory_args[relation_name]:
continue
if not include_trigger and argument["role"] == "trigger":
continue
argument_role = argument["role"]
entity = argument["conceptMention"]["span"]
start, end = get_token_index_from_char_index(entity["start"], entity["end"], token_borders)
spans += [Span(start, end, role=argument_role)]
if spans:
relations += [Relation(spans, label=relation_name)]
relations = sorted(relations, key=lambda r: (r.start, r.end, r.label))
return relations
def get_token_index_from_char_index(start, end, token_borders, fuzzy=False):
token_borders = sorted(token_borders, key=lambda x: x[0])
if fuzzy:
token_start = [i[0] for i in enumerate(token_borders) if i[1][0] >= start]
token_end = [i[0] for i in enumerate(token_borders) if i[1][1] >= end]
else:
token_start = [i[0] for i in enumerate(token_borders) if i[1][0] == start]
token_end = [i[0] for i in enumerate(token_borders) if i[1][1] == end]
try:
return token_start[0], token_end[0]
except IndexError:
raise AttributeError(f"Tokenborders {token_borders} are not valid "
f"for char indizies {start, end} with fuzzy={fuzzy}")
def extract_entities(sample):
token_borders = [(t["span"]["start"], t["span"]["end"]) for t in sample["tokens"]]
entities = []
for concept_mention in sample["conceptMentions"]:
start = concept_mention["span"]["start"]
end = concept_mention["span"]["end"]
start = [i[0] for i in enumerate(token_borders) if i[1][0] == start]
end = [i[0] for i in enumerate(token_borders) if i[1][1] == end]
if len(start) > 0 and len(end) > 0:
entities += [Span(start[0], end[0], role=concept_mention["type"])]
return entities | spart/util/util.py | import copy
import itertools
from typing import List, Iterable
relation_mandatory_args = {
"Accident": {"location", "trigger"},
"CanceledRoute": {"location", "trigger"},
"CanceledStop": {"location", "trigger"},
"Delay": {"location", "trigger"},
"Disaster": {"type", "location"},
"Obstruction": {"location", "trigger"},
"RailReplacementService": {"location", "trigger"},
"TrafficJam": {"location", "trigger"},
"Acquisition": {"buyer", "acquired"},
"Insolvency": {"company", "trigger"},
"Layoffs": {"company", "trigger"},
"Merger": {"old", "new"},
"OrganizationLeadership": {"organization", "person"},
"SpinOff": {"parent", "child"},
"Strike": {"company", "trigger"}
}
class Span:
def __init__(self, pos1, pos2, role: str = None, text: str = None):
self.span = (int(pos1), int(pos2))
self.role = role
self.text = text
@property
def start(self):
return self.span[0]
@property
def end(self):
return self.span[1]
def to_josn(self):
return {
"start": self.span[0],
"end": self.span[1],
"name": self.role if self.role else "",
}
def __repr__(self):
return f"[{repr(self.span)}, {self.role if self.role is not None else ''}]"
def __eq__(self, other):
return self.role == other.role and self.span == other.span
def __hash__(self):
return hash((self.span, self.role))
class Relation:
def __init__(self, spans: List[Span], label: str = None, allow_overlaps=False):
if not len(spans) >= 1:
raise AttributeError("Tried to create Relation with no spans")
# TODO ist das gut so? Review
if not allow_overlaps:
for s1, s2 in itertools.combinations(spans, 2):
if s2.start <= s1.start <= s2.end or s2.start <= s1.end <= s2.end:
# Found conflicting spans!
if s1.start < s2.start:
spans.remove(s2)
elif s2.start < s1.start:
spans.remove(s1)
elif s1.end < s2.end:
spans.remove(s2)
elif s2.end < s1.end:
spans.remove(s1)
elif s1.role < s2.role:
spans.remove(s2)
else:
spans.remove(s1)
self.spans = spans
self.label = label
@property
def start(self):
return min((s.start for s in self.spans))
@property
def end(self):
return max((s.end for s in self.spans))
def get_bio_tags(self, n: int, mode: str = None) -> Iterable[str]:
if not mode:
mode = ""
n_m = max(n, self.end + 1)
tags = ["O"] * n_m
for span in self.spans:
if tags[span.start: span.end + 1] != ["O"] * (1 + span.end - span.start):
raise RuntimeError(f"Nested argument types for relation {self}, "
f"cannot build well-defined tag sequence.")
tag_post = f"{self.label}-{span.role}"
if mode != "":
tag_post = f"{mode}-{tag_post}"
tags[span.start] = f"B-{tag_post}" if mode == "" else f"B-{tag_post}"
if span.end > span.start:
tags[span.start + 1:span.end + 1] = [f"I-{tag_post}"] * (span.end - span.start)
return tags[:n]
@property
def entities(self):
entities = copy.deepcopy(self.spans)
for ent, span in zip(entities, self.spans):
ent.role = f"{self.label}-{span.role}"
return entities
def to_json(self):
return {
"name": self.label,
"ents": [s.to_josn() for s in self.spans],
}
def __repr__(self):
return f"{{({repr(self.spans)}, {self.label if self.label is not None else ''})}}"
def __eq__(self, other):
return {s for s in self.spans} == {s for s in other.spans} and self.label == other.label
def __hash__(self):
return hash(self.label)
def extract_relations_from_smart_sample(sample, only_mandatory=False, include_trigger=True):
token_borders = [(t["span"]["start"], t["span"]["end"]) for t in sample["tokens"]]
relations = []
for relation in sample["relationMentions"]:
relation_name = relation["name"]
spans = []
for argument in relation["args"]:
if only_mandatory and argument["role"] not in relation_mandatory_args[relation_name]:
continue
if not include_trigger and argument["role"] == "trigger":
continue
argument_role = argument["role"]
entity = argument["conceptMention"]["span"]
start, end = get_token_index_from_char_index(entity["start"], entity["end"], token_borders)
spans += [Span(start, end, role=argument_role)]
if spans:
relations += [Relation(spans, label=relation_name)]
relations = sorted(relations, key=lambda r: (r.start, r.end, r.label))
return relations
def get_token_index_from_char_index(start, end, token_borders, fuzzy=False):
token_borders = sorted(token_borders, key=lambda x: x[0])
if fuzzy:
token_start = [i[0] for i in enumerate(token_borders) if i[1][0] >= start]
token_end = [i[0] for i in enumerate(token_borders) if i[1][1] >= end]
else:
token_start = [i[0] for i in enumerate(token_borders) if i[1][0] == start]
token_end = [i[0] for i in enumerate(token_borders) if i[1][1] == end]
try:
return token_start[0], token_end[0]
except IndexError:
raise AttributeError(f"Tokenborders {token_borders} are not valid "
f"for char indizies {start, end} with fuzzy={fuzzy}")
def extract_entities(sample):
token_borders = [(t["span"]["start"], t["span"]["end"]) for t in sample["tokens"]]
entities = []
for concept_mention in sample["conceptMentions"]:
start = concept_mention["span"]["start"]
end = concept_mention["span"]["end"]
start = [i[0] for i in enumerate(token_borders) if i[1][0] == start]
end = [i[0] for i in enumerate(token_borders) if i[1][1] == end]
if len(start) > 0 and len(end) > 0:
entities += [Span(start[0], end[0], role=concept_mention["type"])]
return entities | 0.540924 | 0.265499 |
import pytest
from app.emails import send_email
@pytest.mark.parametrize('sender, config_set_arg, config_set, tags', [
(None, None, '', []),
('<EMAIL>', 'foo', 'foo', [{'Name': 'foo', 'Value': 'foo'}])
])
def test_send_email(test_app, mocker, sender, config_set_arg,
config_set, tags):
"""Tests the send_email function."""
mocked_boto3_client = mocker.patch('app.emails.boto3.client')
mocked_boto3_client_instance = mocked_boto3_client.return_value
mocked_render_template = mocker.patch('app.emails.render_template')
mocked_template_html = mocked_render_template.return_value
test_app.config['NO_EMAIL'] = False
with test_app.app_context():
test_app.config['SES_REGION_NAME'] = 'foo'
test_app.config['AWS_ACCESS_KEY_ID'] = 'bar'
test_app.config['AWS_SECRET_ACCESS_KEY'] = 'baz'
test_app.config['SES_DEFAULT_EMAIL_SOURCE'] = '<EMAIL>'
send_email('foo', ['bar'], 'foo.html', {'baz': 'qux'},
sender=sender,
configuration_set_name=config_set_arg)
mocked_boto3_client.assert_called_with(
'ses',
region_name='foo',
aws_access_key_id='bar',
aws_secret_access_key='baz')
mocked_render_template.assert_called_with('foo.html', baz='qux')
mocked_boto3_client_instance.send_email.assert_called_with(
Source='<EMAIL>',
Destination={'ToAddresses': ['bar']},
Message={
'Subject': {'Data': 'foo'},
'Body': {
'Html': {'Data': mocked_template_html}
}
},
ConfigurationSetName=config_set,
Tags=tags
)
def test_send_error_email_or_email_disabled(test_app, mocker, caplog):
"""Tests the send_email function for an error email or if NO_EMAIL is set."""
mocked_boto3_client = mocker.patch('app.emails.boto3.client')
mocker.patch('app.emails.render_template')
test_app.config['NO_EMAIL'] = True
with test_app.app_context():
test_app.config['SES_REGION_NAME'] = 'foo'
test_app.config['AWS_ACCESS_KEY_ID'] = 'bar'
test_app.config['AWS_SECRET_ACCESS_KEY'] = 'baz'
test_app.config['SES_DEFAULT_EMAIL_SOURCE'] = '<EMAIL>'
send_email('foo', ['bar'], 'foo.html', {})
log_text = ('NO_EMAIL environment variable set. '
'Suppressing an email with the following params: '
'Sender: {}. Recipients: {}. Subject: {}.'.format(
'<EMAIL>', ['bar'], 'foo'))
assert log_text in caplog.text
mocked_boto3_client.return_value.send_email.assert_not_called() | tests/unit/test_emails.py | import pytest
from app.emails import send_email
@pytest.mark.parametrize('sender, config_set_arg, config_set, tags', [
(None, None, '', []),
('<EMAIL>', 'foo', 'foo', [{'Name': 'foo', 'Value': 'foo'}])
])
def test_send_email(test_app, mocker, sender, config_set_arg,
config_set, tags):
"""Tests the send_email function."""
mocked_boto3_client = mocker.patch('app.emails.boto3.client')
mocked_boto3_client_instance = mocked_boto3_client.return_value
mocked_render_template = mocker.patch('app.emails.render_template')
mocked_template_html = mocked_render_template.return_value
test_app.config['NO_EMAIL'] = False
with test_app.app_context():
test_app.config['SES_REGION_NAME'] = 'foo'
test_app.config['AWS_ACCESS_KEY_ID'] = 'bar'
test_app.config['AWS_SECRET_ACCESS_KEY'] = 'baz'
test_app.config['SES_DEFAULT_EMAIL_SOURCE'] = '<EMAIL>'
send_email('foo', ['bar'], 'foo.html', {'baz': 'qux'},
sender=sender,
configuration_set_name=config_set_arg)
mocked_boto3_client.assert_called_with(
'ses',
region_name='foo',
aws_access_key_id='bar',
aws_secret_access_key='baz')
mocked_render_template.assert_called_with('foo.html', baz='qux')
mocked_boto3_client_instance.send_email.assert_called_with(
Source='<EMAIL>',
Destination={'ToAddresses': ['bar']},
Message={
'Subject': {'Data': 'foo'},
'Body': {
'Html': {'Data': mocked_template_html}
}
},
ConfigurationSetName=config_set,
Tags=tags
)
def test_send_error_email_or_email_disabled(test_app, mocker, caplog):
"""Tests the send_email function for an error email or if NO_EMAIL is set."""
mocked_boto3_client = mocker.patch('app.emails.boto3.client')
mocker.patch('app.emails.render_template')
test_app.config['NO_EMAIL'] = True
with test_app.app_context():
test_app.config['SES_REGION_NAME'] = 'foo'
test_app.config['AWS_ACCESS_KEY_ID'] = 'bar'
test_app.config['AWS_SECRET_ACCESS_KEY'] = 'baz'
test_app.config['SES_DEFAULT_EMAIL_SOURCE'] = '<EMAIL>'
send_email('foo', ['bar'], 'foo.html', {})
log_text = ('NO_EMAIL environment variable set. '
'Suppressing an email with the following params: '
'Sender: {}. Recipients: {}. Subject: {}.'.format(
'<EMAIL>', ['bar'], 'foo'))
assert log_text in caplog.text
mocked_boto3_client.return_value.send_email.assert_not_called() | 0.480235 | 0.186595 |
import unittest
from follower_maze import clients
from tests import factories
from tests import mocks
from tests.helpers import async_test
PAYLOAD1 = b"1"
PAYLOAD2 = b"2"
CLIENT1 = 1
CLIENT2 = 13
CLIENT3 = 87
class TestClientRegistry(unittest.TestCase):
"""
Test ClientRegistry: correct mapping, notifies and auto-cleaning.
"""
@async_test
async def tearDown(self):
await clients.registry.Clients.reset()
@async_test
async def test_register_one(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id), client_writer)
@async_test
async def test_register_two(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id1), client_writer1)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id2), client_writer2)
@async_test
async def test_get_closed_writer(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id), client_writer)
client_writer.set_closing()
self.assertIsNone(await clients.maybe_get_client_writer(client_id=client_id))
@async_test
async def test_broadcast_one_client(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.broadcast(PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer.get_mailbox())
@async_test
async def test_broadcast_two_clients(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.broadcast(PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer2.get_mailbox())
@async_test
async def test_broadcast_two_clients_one_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
client_writer2.set_closing()
await clients.broadcast(PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_send_private_message_one_client(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.send_private_message(client_id=client_id, payload=PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer.get_mailbox())
@async_test
async def test_send_private_message_different_client(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.send_private_message(client_id=client_id + 1, payload=PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 0)
@async_test
async def test_follow(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.follow(from_client=client_id1, to_client=client_id2, payload=PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer2.get_mailbox())
@async_test
async def test_follow_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
client_writer2.set_closing()
await clients.follow(from_client=client_id1, to_client=client_id2, payload=PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_unfollow(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.unfollow(from_client=client_id1, to_client=client_id2)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_unfollow_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
client_writer2.set_closing()
await clients.unfollow(from_client=client_id1, to_client=client_id2)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_status_update_no_followers(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.status_update(client_id=client_id, payload=PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 0)
@async_test
async def test_status_update_one_follower(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
@async_test
async def test_status_update_two_followers(self):
client_id1, client_writer1 = 1, mocks.MockClientWriter()
client_id2, client_writer2 = 13, mocks.MockClientWriter()
client_id3, client_writer3 = 17, mocks.MockClientWriter()
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.handle_new_client(client_id=client_id3, writer=client_writer3)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.follow(from_client=client_id3, to_client=client_id1, payload=PAYLOAD2)
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 2)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
self.assertEqual(len(client_writer3.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer3.get_mailbox())
@async_test
async def test_status_update_two_followers_one_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
client_id3, client_writer3 = factories.get_client(CLIENT3)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.handle_new_client(client_id=client_id3, writer=client_writer3)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.follow(from_client=client_id3, to_client=client_id1, payload=PAYLOAD1)
client_writer3.set_closing()
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 2)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
self.assertEqual(len(client_writer3.get_mailbox()), 0)
@async_test
async def test_no_status_update_after_unfollow(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
client_id3, client_writer3 = factories.get_client(CLIENT3)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.handle_new_client(client_id=client_id3, writer=client_writer3)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.follow(from_client=client_id3, to_client=client_id1, payload=PAYLOAD1)
await clients.unfollow(from_client=client_id3, to_client=client_id1)
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 2)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
self.assertEqual(len(client_writer3.get_mailbox()), 0)
if __name__ == '__main__':
unittest.main() | tests/test_client_registry.py | import unittest
from follower_maze import clients
from tests import factories
from tests import mocks
from tests.helpers import async_test
PAYLOAD1 = b"1"
PAYLOAD2 = b"2"
CLIENT1 = 1
CLIENT2 = 13
CLIENT3 = 87
class TestClientRegistry(unittest.TestCase):
"""
Test ClientRegistry: correct mapping, notifies and auto-cleaning.
"""
@async_test
async def tearDown(self):
await clients.registry.Clients.reset()
@async_test
async def test_register_one(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id), client_writer)
@async_test
async def test_register_two(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id1), client_writer1)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id2), client_writer2)
@async_test
async def test_get_closed_writer(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
self.assertEqual(await clients.maybe_get_client_writer(client_id=client_id), client_writer)
client_writer.set_closing()
self.assertIsNone(await clients.maybe_get_client_writer(client_id=client_id))
@async_test
async def test_broadcast_one_client(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.broadcast(PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer.get_mailbox())
@async_test
async def test_broadcast_two_clients(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.broadcast(PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer2.get_mailbox())
@async_test
async def test_broadcast_two_clients_one_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
client_writer2.set_closing()
await clients.broadcast(PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_send_private_message_one_client(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.send_private_message(client_id=client_id, payload=PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer.get_mailbox())
@async_test
async def test_send_private_message_different_client(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.send_private_message(client_id=client_id + 1, payload=PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 0)
@async_test
async def test_follow(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.follow(from_client=client_id1, to_client=client_id2, payload=PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer2.get_mailbox())
@async_test
async def test_follow_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
client_writer2.set_closing()
await clients.follow(from_client=client_id1, to_client=client_id2, payload=PAYLOAD1)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_unfollow(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.unfollow(from_client=client_id1, to_client=client_id2)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_unfollow_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
client_writer2.set_closing()
await clients.unfollow(from_client=client_id1, to_client=client_id2)
self.assertEqual(len(client_writer1.get_mailbox()), 0)
self.assertEqual(len(client_writer2.get_mailbox()), 0)
@async_test
async def test_status_update_no_followers(self):
client_id, client_writer = factories.get_client()
await clients.handle_new_client(client_id=client_id, writer=client_writer)
await clients.status_update(client_id=client_id, payload=PAYLOAD1)
self.assertEqual(len(client_writer.get_mailbox()), 0)
@async_test
async def test_status_update_one_follower(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 1)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
@async_test
async def test_status_update_two_followers(self):
client_id1, client_writer1 = 1, mocks.MockClientWriter()
client_id2, client_writer2 = 13, mocks.MockClientWriter()
client_id3, client_writer3 = 17, mocks.MockClientWriter()
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.handle_new_client(client_id=client_id3, writer=client_writer3)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.follow(from_client=client_id3, to_client=client_id1, payload=PAYLOAD2)
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 2)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
self.assertEqual(len(client_writer3.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer3.get_mailbox())
@async_test
async def test_status_update_two_followers_one_closed(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
client_id3, client_writer3 = factories.get_client(CLIENT3)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.handle_new_client(client_id=client_id3, writer=client_writer3)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.follow(from_client=client_id3, to_client=client_id1, payload=PAYLOAD1)
client_writer3.set_closing()
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 2)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
self.assertEqual(len(client_writer3.get_mailbox()), 0)
@async_test
async def test_no_status_update_after_unfollow(self):
client_id1, client_writer1 = factories.get_client(CLIENT1)
client_id2, client_writer2 = factories.get_client(CLIENT2)
client_id3, client_writer3 = factories.get_client(CLIENT3)
await clients.handle_new_client(client_id=client_id1, writer=client_writer1)
await clients.handle_new_client(client_id=client_id2, writer=client_writer2)
await clients.handle_new_client(client_id=client_id3, writer=client_writer3)
await clients.follow(from_client=client_id2, to_client=client_id1, payload=PAYLOAD1)
await clients.follow(from_client=client_id3, to_client=client_id1, payload=PAYLOAD1)
await clients.unfollow(from_client=client_id3, to_client=client_id1)
await clients.status_update(client_id=client_id1, payload=PAYLOAD2)
self.assertEqual(len(client_writer1.get_mailbox()), 2)
self.assertIn(PAYLOAD1, client_writer1.get_mailbox())
self.assertEqual(len(client_writer2.get_mailbox()), 1)
self.assertIn(PAYLOAD2, client_writer2.get_mailbox())
self.assertEqual(len(client_writer3.get_mailbox()), 0)
if __name__ == '__main__':
unittest.main() | 0.429908 | 0.225385 |
import smtplib
import sys
def load_emails(filename):
"""
Load the target email addresses from a file.
"""
emails = []
print('[*] Loading email addresses.')
with open(filename) as f:
for line in f:
line = line.rstrip()
if line.startswith('#'):
continue
if line == '':
continue
emails.append(line)
return emails
def usage():
"""
Print usage statement and exit.
"""
print('Usage: smtp_enum.py mx_server port email_file')
sys.exit()
if __name__ == '__main__':
"""
Enumerate the target email addresses.
Use the EXPN, VRFY, or RCPT TO method to enumerate email addresses.
"""
if len(sys.argv) != 4:
usage()
debug = False
helo = 'mail.example.com'
mail_from = '<EMAIL>'
mx = sys.argv[1]
port = int(sys.argv[2])
emails = load_emails(sys.argv[3])
try:
smtp = smtplib.SMTP()
smtp.set_debuglevel(debug)
smtp.connect(mx, port)
smtp.ehlo(helo)
if smtp.has_extn('vrfy') is True:
print('[*] Using VRFY to enumerate email addresses.')
check = smtp.vrfy
elif smtp.has_extn('expn') is True:
print('[*] Using EXPN to enumerate email addresses.')
check = smtp.expn
else:
print('[*] Using RCPT to enumerate email addresses.')
smtp.mail(mail_from)
check = smtp.rcpt
for email in emails:
code, _ = check(email)
if code == 250:
print('[+] {0}'.format(email))
else:
print('[-] {0}'.format(email))
smtp.quit()
except smtplib.SMTPDataError as e:
print('[-] {0}'.format(str(e[1])))
except smtplib.SMTPServerDisconnected as e:
print('[-] {0}'.format(str(e)))
except smtplib.SMTPConnectError as e:
print('[-] {0}'.format(str(e[1])))
except smtplib.SMTPSenderRefused as e:
print('[-] {0}'.format(str(e))) | smtp_enum.py |
import smtplib
import sys
def load_emails(filename):
"""
Load the target email addresses from a file.
"""
emails = []
print('[*] Loading email addresses.')
with open(filename) as f:
for line in f:
line = line.rstrip()
if line.startswith('#'):
continue
if line == '':
continue
emails.append(line)
return emails
def usage():
"""
Print usage statement and exit.
"""
print('Usage: smtp_enum.py mx_server port email_file')
sys.exit()
if __name__ == '__main__':
"""
Enumerate the target email addresses.
Use the EXPN, VRFY, or RCPT TO method to enumerate email addresses.
"""
if len(sys.argv) != 4:
usage()
debug = False
helo = 'mail.example.com'
mail_from = '<EMAIL>'
mx = sys.argv[1]
port = int(sys.argv[2])
emails = load_emails(sys.argv[3])
try:
smtp = smtplib.SMTP()
smtp.set_debuglevel(debug)
smtp.connect(mx, port)
smtp.ehlo(helo)
if smtp.has_extn('vrfy') is True:
print('[*] Using VRFY to enumerate email addresses.')
check = smtp.vrfy
elif smtp.has_extn('expn') is True:
print('[*] Using EXPN to enumerate email addresses.')
check = smtp.expn
else:
print('[*] Using RCPT to enumerate email addresses.')
smtp.mail(mail_from)
check = smtp.rcpt
for email in emails:
code, _ = check(email)
if code == 250:
print('[+] {0}'.format(email))
else:
print('[-] {0}'.format(email))
smtp.quit()
except smtplib.SMTPDataError as e:
print('[-] {0}'.format(str(e[1])))
except smtplib.SMTPServerDisconnected as e:
print('[-] {0}'.format(str(e)))
except smtplib.SMTPConnectError as e:
print('[-] {0}'.format(str(e[1])))
except smtplib.SMTPSenderRefused as e:
print('[-] {0}'.format(str(e))) | 0.307774 | 0.205097 |
import json
from otcextensions.tests.functional.osclient.nat.v2 import common
class TestSnat(common.NatTestCase):
"""Functional Tests for NAT Gateway"""
def setUp(self):
super(TestSnat, self).setUp()
def test_snat_rule_list(self):
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
))
self.assertIsNotNone(json_output)
def test_snat_rule_list_filters(self):
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
'--limit 1 --id 2 '
'--project-id 3 '
'--nat-gateway-id 4 '
'--network-id 5 '
'--cidr 6 '
'--source-type 7 '
'--floating-ip-id 8 '
'--floating-ip-address 9 '
'--status 10 '
'--admin-state-up true '
'--created-at "{}"'.format(self.CURR_TIME)
))
self.assertIsNotNone(json_output)
def test_nat_snat_rule(self):
json_output = self.create_snat_rule()
self.addCleanup(self.delete_snat_rule)
snat_rule_id = json_output['id']
nat_id = json_output['nat_gateway_id']
# List Snat Rule by Snat Id filter
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
'--id ' + snat_rule_id
))
self.assertIsNotNone(json_output)
self.assertEqual(next(iter(json_output))['Id'], snat_rule_id)
self.assertEqual(
next(iter(json_output))['Nat Gateway Id'], nat_id)
# List Snat Rule by nat-gateway-id filter
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
'--nat-gateway-id ' + nat_id
))
self.assertIsNotNone(json_output)
self.assertEqual(
next(iter(json_output))['Nat Gateway Id'], nat_id)
# Show Snat Rule by Id
self.assertIsNotNone(self.SNAT_RULE_ID)
json_output = json.loads(self.openstack(
'nat snat rule show '
' -f json ' + self.SNAT_RULE_ID
))
self.assertIsNotNone(json_output)
self.assertEqual(json_output['id'], self.SNAT_RULE_ID) | otcextensions/tests/functional/osclient/nat/v2/test_snat.py |
import json
from otcextensions.tests.functional.osclient.nat.v2 import common
class TestSnat(common.NatTestCase):
"""Functional Tests for NAT Gateway"""
def setUp(self):
super(TestSnat, self).setUp()
def test_snat_rule_list(self):
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
))
self.assertIsNotNone(json_output)
def test_snat_rule_list_filters(self):
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
'--limit 1 --id 2 '
'--project-id 3 '
'--nat-gateway-id 4 '
'--network-id 5 '
'--cidr 6 '
'--source-type 7 '
'--floating-ip-id 8 '
'--floating-ip-address 9 '
'--status 10 '
'--admin-state-up true '
'--created-at "{}"'.format(self.CURR_TIME)
))
self.assertIsNotNone(json_output)
def test_nat_snat_rule(self):
json_output = self.create_snat_rule()
self.addCleanup(self.delete_snat_rule)
snat_rule_id = json_output['id']
nat_id = json_output['nat_gateway_id']
# List Snat Rule by Snat Id filter
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
'--id ' + snat_rule_id
))
self.assertIsNotNone(json_output)
self.assertEqual(next(iter(json_output))['Id'], snat_rule_id)
self.assertEqual(
next(iter(json_output))['Nat Gateway Id'], nat_id)
# List Snat Rule by nat-gateway-id filter
json_output = json.loads(self.openstack(
'nat snat rule list -f json '
'--nat-gateway-id ' + nat_id
))
self.assertIsNotNone(json_output)
self.assertEqual(
next(iter(json_output))['Nat Gateway Id'], nat_id)
# Show Snat Rule by Id
self.assertIsNotNone(self.SNAT_RULE_ID)
json_output = json.loads(self.openstack(
'nat snat rule show '
' -f json ' + self.SNAT_RULE_ID
))
self.assertIsNotNone(json_output)
self.assertEqual(json_output['id'], self.SNAT_RULE_ID) | 0.474875 | 0.225768 |
calls."""
from typing import List, Union
from mythril.disassembler.disassembly import Disassembly
from mythril.laser.ethereum.cfg import Node, Edge, JumpType
from mythril.laser.ethereum.state.calldata import ConcreteCalldata
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.transaction.transaction_models import (
MessageCallTransaction,
get_next_transaction_id,
)
def execute_message_call(
laser_evm,
callee_address,
caller_address,
origin_address,
code,
data,
gas_limit,
gas_price,
value,
track_gas=False,
) -> Union[None, List[GlobalState]]:
"""Execute a message call transaction from all open states.
:param laser_evm:
:param callee_address:
:param caller_address:
:param origin_address:
:param code:
:param data:
:param gas_limit:
:param gas_price:
:param value:
:param track_gas:
:return:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
open_states = laser_evm.open_states[:]
del laser_evm.open_states[:]
for open_world_state in open_states:
next_transaction_id = get_next_transaction_id()
transaction = MessageCallTransaction(
world_state=open_world_state,
identifier=next_transaction_id,
gas_price=gas_price,
gas_limit=gas_limit,
origin=origin_address,
code=Disassembly(code),
caller=caller_address,
callee_account=open_world_state[callee_address],
call_data=ConcreteCalldata(next_transaction_id, data),
call_value=value,
)
_setup_global_state_for_execution(laser_evm, transaction)
return laser_evm.exec(track_gas=track_gas)
def _setup_global_state_for_execution(laser_evm, transaction) -> None:
"""Set up global state and cfg for a transactions execution.
:param laser_evm:
:param transaction:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
global_state = transaction.initial_global_state()
global_state.transaction_stack.append((transaction, None))
new_node = Node(
global_state.environment.active_account.contract_name,
function_name=global_state.environment.active_function_name,
)
if laser_evm.requires_statespace:
laser_evm.nodes[new_node.uid] = new_node
if transaction.world_state.node and laser_evm.requires_statespace:
laser_evm.edges.append(
Edge(
transaction.world_state.node.uid,
new_node.uid,
edge_type=JumpType.Transaction,
condition=None,
)
)
new_node.constraints = global_state.world_state.constraints
global_state.world_state.transaction_sequence.append(transaction)
global_state.node = new_node
new_node.states.append(global_state)
laser_evm.work_list.append(global_state) | mythril/laser/ethereum/transaction/concolic.py | calls."""
from typing import List, Union
from mythril.disassembler.disassembly import Disassembly
from mythril.laser.ethereum.cfg import Node, Edge, JumpType
from mythril.laser.ethereum.state.calldata import ConcreteCalldata
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.transaction.transaction_models import (
MessageCallTransaction,
get_next_transaction_id,
)
def execute_message_call(
laser_evm,
callee_address,
caller_address,
origin_address,
code,
data,
gas_limit,
gas_price,
value,
track_gas=False,
) -> Union[None, List[GlobalState]]:
"""Execute a message call transaction from all open states.
:param laser_evm:
:param callee_address:
:param caller_address:
:param origin_address:
:param code:
:param data:
:param gas_limit:
:param gas_price:
:param value:
:param track_gas:
:return:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
open_states = laser_evm.open_states[:]
del laser_evm.open_states[:]
for open_world_state in open_states:
next_transaction_id = get_next_transaction_id()
transaction = MessageCallTransaction(
world_state=open_world_state,
identifier=next_transaction_id,
gas_price=gas_price,
gas_limit=gas_limit,
origin=origin_address,
code=Disassembly(code),
caller=caller_address,
callee_account=open_world_state[callee_address],
call_data=ConcreteCalldata(next_transaction_id, data),
call_value=value,
)
_setup_global_state_for_execution(laser_evm, transaction)
return laser_evm.exec(track_gas=track_gas)
def _setup_global_state_for_execution(laser_evm, transaction) -> None:
"""Set up global state and cfg for a transactions execution.
:param laser_evm:
:param transaction:
"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
global_state = transaction.initial_global_state()
global_state.transaction_stack.append((transaction, None))
new_node = Node(
global_state.environment.active_account.contract_name,
function_name=global_state.environment.active_function_name,
)
if laser_evm.requires_statespace:
laser_evm.nodes[new_node.uid] = new_node
if transaction.world_state.node and laser_evm.requires_statespace:
laser_evm.edges.append(
Edge(
transaction.world_state.node.uid,
new_node.uid,
edge_type=JumpType.Transaction,
condition=None,
)
)
new_node.constraints = global_state.world_state.constraints
global_state.world_state.transaction_sequence.append(transaction)
global_state.node = new_node
new_node.states.append(global_state)
laser_evm.work_list.append(global_state) | 0.74872 | 0.295795 |
import sqlite3
from .validations import type_pos_int, type_non_neg_int, expect_in, expect_type, expect_len_range, cast_expect_type
from .shared_exceptions import StatusMessageException
def _process_values(values, processors, column_list, context):
if not values or not processors:
return values
if isinstance(values, (list, tuple)):
if isinstance(values[0], (list, tuple, dict)):
return [_process_values(v, processors, column_list, context) for v in values]
processors = {i: processors[c] for i, c in enumerate(column_list) if c in processors}
iterator = range(len(values))
values = list(values)
else:
iterator = values.keys()
for key in iterator:
if key in processors:
p_list = processors[key]
if not isinstance(p_list, (list, tuple)):
p_list = [p_list]
for p in p_list:
values[key] = p(values[key], **context)
return values
class SQLResult():
"""Result object from SQLite queries"""
def __init__(self, result_cursor, postprocessors=None, column_list=None):
self.result_cursor = result_cursor
self.postprocessors = postprocessors
self.column_list = column_list
def _postprocess_values(self, values):
return _process_values(values, self.postprocessors, self.column_list, context={})
# This can be used as a iterator, WILL run the postprocessors
def __iter__(self):
return self
def __next__(self):
return self._postprocess_values(next(self.result_cursor))
# Convenience functions for getting certain numbers of results, WILL run the postprocessors
def one_or_none(self):
return self.one(True)
def one(self, none_ok=False):
first_row = self.result_cursor.fetchone()
if not first_row:
if not none_ok:
raise SQLCompositorBadResult('Did not find any results for query, where exactly one was expected')
return None
if self.result_cursor.fetchone():
raise SQLCompositorBadResult('Found too many results for query, where at most one was expected')
return self._postprocess_values(first_row)
def all(self):
return self._postprocess_values(self.result_cursor.fetchall())
# Built-in functions in sqlite3, note that these DO NOT run the postprocessors, for raw data access
def lastrowid(self):
return self.result_cursor.lastrowid
def fetchone(self):
return self.result_cursor.fetchone()
def fetchmany(self, size=None):
if not size:
return self.result_cursor.fetchmany()
return self.result_cursor.fetchmany(size)
def fetchall(self):
return self.result_cursor.fetchall()
class SQLiteDB():
"""SQLite Database interface to auto-generate queries and results"""
def __init__(self, db_path, table_mappers, preprocessors=None, postprocessors=None,
enable_foreign_key_constraints=False):
self.db_path = db_path
if not table_mappers:
raise SQLCompositorBadInput('Must define table_mappers to use this interface')
self.table_mappers = table_mappers
self.current_connection = None
self.current_cursor = None
self.enable_foreign_key_constraints = enable_foreign_key_constraints
self.preprocessors = preprocessors
self.postprocessors = postprocessors
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def __del__(self):
self.close()
def is_valid_table(self, table_name):
return table_name in self.table_mappers
def get_preprocessors(self, table_name):
if not self.preprocessors:
return None
return self.preprocessors.get(table_name)
def get_postprocessors(self, table_name):
if not self.postprocessors:
return None
return self.postprocessors.get(table_name)
def select_all(self, table_name):
return SQLQuery('SELECT', table_name, self).column_list(self.table_mappers[table_name])
def select(self, table_name, columns):
return SQLQuery('SELECT', table_name, self).column_list(columns)
def update(self, table_name):
return SQLQuery('UPDATE', table_name, self)
def update_mapped(self, table_name, set_values):
return self.update(table_name).set_values(set_values)
def insert(self, table_name, columns):
return self.insert_into(table_name, columns)
def insert_mapped(self, table_name, data, autorun=True):
return self.insert_into_mapped(table_name, data, autorun=autorun)
def insert_into(self, table_name, columns):
return SQLQuery('INSERT INTO', table_name, self).column_list(columns)
def insert_into_mapped(self, table_name, data, autorun=True):
if isinstance(data, (list, tuple)):
# Auto-detect column list
columns = set()
for row in data:
columns.update(list(row.keys()))
return SQLQuery('INSERT INTO', table_name, self).column_list(list(columns)).values_mapped(data, autorun=autorun)
else:
return SQLQuery('INSERT INTO', table_name, self).column_list(list(data.keys())).values_mapped(data, autorun=autorun)
def delete(self, table_name):
# TODO: Delete-all protection?
return SQLQuery('DELETE', table_name, self)
def connection(self):
if not self.current_connection:
self.current_connection = sqlite3.connect(self.db_path)
return self.current_connection
def cursor(self):
if not self.current_cursor:
self.current_cursor = self.connection().cursor()
if self.enable_foreign_key_constraints:
self.current_cursor.execute('PRAGMA foreign_keys = ON')
return self.current_cursor
def execute(self, query_str, fill_values=None, postprocessors=None, column_list=None):
cur = self.cursor()
if not fill_values:
cur.execute(query_str)
else:
cur.execute(query_str, fill_values)
return SQLResult(cur, postprocessors, column_list)
def executemany(self, query_str, fill_values, postprocessors=None, column_list=None):
cur = self.cursor()
cur.executemany(query_str, fill_values)
return SQLResult(cur, postprocessors, column_list)
def rollback(self):
if not self.current_connection:
return
self.current_connection.rollback()
def in_transaction(self):
if not self.current_connection:
return False
return self.current_connection.in_transaction
def commit(self, no_changes_ok=False):
if not self.in_transaction():
if no_changes_ok:
return
raise RuntimeError('No changes in a transaction open for SQLiteDB instance, cannot commit nothing')
self.current_connection.commit()
def close(self):
if not self.current_connection:
return
# Default is to NOT commit any changes, be sure to call commit first!
self.current_connection.close()
self.current_cursor = None
self.current_connection = None
class SQLCompositorBadInput(StatusMessageException):
def __init__(self, message, status_code=None, additional_information=None):
StatusMessageException.__init__(self, message, status_code, additional_information)
class SQLCompositorBadResult(StatusMessageException):
def __init__(self, message, status_code=None, additional_information=None):
StatusMessageException.__init__(self, message, status_code, additional_information)
def _process_single_column_values(values, column, processors, context):
if not values or not processors:
return values
if column in processors:
p_list = processors[column]
if not isinstance(p_list, (list, tuple)):
p_list = [p_list]
for p in p_list:
if isinstance(values, (list, tuple)):
values = [p(v, **context) for v in values]
else:
values = p(values, **context)
return values
def generate_selector(selector, fill_values, valid_columns, processors, context):
"""Generates a selector from the given JSON-style selector"""
if isinstance(selector, dict):
if len(selector) != 1:
raise SQLCompositorBadInput('Each nested selector must have only one logical operator: AND or OR')
for kind, s_list in selector.items():
kind = kind.upper()
expect_type(s_list, (list, tuple), 'selector')
expect_in(kind, ('AND', 'OR'), 'logical operator')
return f' {kind} '.join([f'({generate_selector(s, fill_values, valid_columns, processors, context)})' for s in s_list])
expect_type(selector, (list, tuple), 'selector')
expect_len_range(selector, 2, 3, 'selector')
column = selector[0]
expect_in(column, valid_columns, 'column name')
operator = selector[1].lower()
value = None
value_required = True
value_expected_types = None
value_list = False
if operator in ('eq', '=', '=='):
sql_operator = '='
elif operator in ('lt', '<'):
sql_operator = '<'
elif operator in ('gt', '>'):
sql_operator = '>'
elif operator in ('lte', '<='):
sql_operator = '<='
elif operator in ('gte', '>='):
sql_operator = '>='
elif operator in ('in'):
sql_operator = 'IN'
value_expected_types = (list, tuple)
value_list = True
elif operator in ('notin', 'not_in'):
sql_operator = 'NOT IN'
value_expected_types = (list, tuple)
value_list = True
elif operator in ('like'):
sql_operator = 'LIKE'
value_expected_types = str
elif operator in ('isnull', 'is_null'):
# TODO: 'is', 'null' or 'is', None options?
sql_operator = 'IS NULL'
value_required = False
elif operator in ('isnotnull', 'is_not_null'):
sql_operator = 'IS NOT NULL'
value_required = False
else:
raise SQLCompositorBadInput('Unsupported Operator')
if value_required:
if len(selector) != 3 or selector[2] is None:
raise SQLCompositorBadInput(f'Must provide a non-null value for comparison for operator {operator}')
value = _process_single_column_values(selector[2], column, processors, context)
if value_expected_types:
expect_type(value, value_expected_types, f'value for {operator} operator')
if value_list:
fill_values.extend(value) # As this can be user-supplied
return f'"{column}" {sql_operator} (' + ','.join(['?'] * len(value)) + ')'
fill_values.append(value) # As this can be user-supplied
return f'"{column}" {sql_operator} ?'
return f'"{column}" {sql_operator}'
# Table to dict mapper for results (multiple rows)
def map_index(index_names, values):
mapped_values = []
if not values:
return mapped_values
for row in values:
mapped_values.append(map_index_one_row(index_names, row))
return mapped_values
def map_index_one_row(index_names, row):
if row is None:
return None
mapped_row = {}
for i, name in enumerate(index_names):
if len(row) <= i:
break
mapped_row[name] = row[i]
return mapped_row
# Dict to index mapper for input values (one row at a time)
def unmap_index(index_names, mapped_values):
values = [None] * len(index_names)
if not mapped_values:
return values
for key, value in mapped_values.items():
expect_in(key, index_names, 'column name')
values[index_names.index(key)] = value
return values
class SQLQuery():
"""Base class for handling and compositing SQL queries"""
def __init__(self, kind, table_name, db):
self.kind = kind.upper().strip()
expect_in(self.kind, ('UPDATE', 'SELECT', 'INSERT INTO', 'DELETE'), 'query kind')
self.table_name = table_name.strip()
if not db.is_valid_table(self.table_name):
raise SQLCompositorBadInput(f'Unknown table: {self.table_name}')
self.valid_columns = db.table_mappers[self.table_name]
self.data = {
'column_list': None,
'where': None,
'order_by': None,
'set_values': None,
'values': None,
'limit': None,
'offset': None,
}
self.count_mode = False
self.many_query = False
self.fill_values = []
self.db = db
def _get_table_map(self):
return self.db.table_mappers[self.table_name]
# Validations
def _validate_clause(self, clause, fill_values):
if clause.count('?') != len(fill_values):
raise RuntimeError('Internal Error: Expected equal number of ? substitution elements and fill_values')
def expect_kind(self, kinds, func_name):
if not isinstance(kinds, (list, tuple)):
kinds = [kinds]
if self.kind not in kinds:
raise SQLCompositorBadInput(f'Expected kind to be in {kinds} for function {func_name}')
def _set_query_data_only_once(self, key, value):
if self.data.get(key) is not None:
raise SQLCompositorBadInput(f'This query has {key} already set!')
self.data[key] = value
return self
# Functions to be called to compose a query
def column_list(self, column_list):
self.expect_kind(('SELECT', 'INSERT INTO'), 'column_list')
if not isinstance(column_list, (list, tuple)):
if column_list != '*':
raise SQLCompositorBadInput('Column list must be a list or *')
return self._set_query_data_only_once('column_list', self.valid_columns) # Use the original list here to ensure no ordering mismatches
for c in column_list:
if c not in self.valid_columns:
raise SQLCompositorBadInput(f'Unknown column: {c}')
self._set_query_data_only_once('column_list', column_list)
return self
def where(self, selector):
"""
Selectors are in the format:
['col', 'eq', 5] -> col = 5
{'and': [
['col', 'lt', 6],
['a', 'like', '%b%']
]} -> col < 6 AND a LIKE %b%
"""
self.expect_kind(('SELECT', 'UPDATE', 'DELETE'), 'where')
new_fill_values = []
clause = generate_selector(selector, new_fill_values, self.valid_columns, self.db.get_preprocessors(self.table_name),
{'db': self.db, 'mode': 'WHERE'})
self._validate_clause(clause, new_fill_values)
self._set_query_data_only_once('where', clause)
self.fill_values.extend(new_fill_values)
return self
def get_id(self, row_id):
# Shortcut for getting a particular ID
return self.where(['id', 'eq', row_id])
def _preprocess_values(self, values, mode='INSERT INTO'):
return _process_values(values, self.db.get_preprocessors(self.table_name), self.data['column_list'],
{'db': self.db, 'mode': mode})
def set_values(self, set_values):
self.expect_kind('UPDATE', 'set_values')
if not isinstance(set_values, dict):
raise SQLCompositorBadInput('Must provide a dictionary of column names to values for set_values')
for c in set_values.keys():
if c not in self.valid_columns:
raise SQLCompositorBadInput(f'Unknown column: {c}')
self._set_query_data_only_once('set_values', self._preprocess_values(set_values, mode='UPDATE'))
return self
def values(self, values, autorun=True):
self.expect_kind('INSERT INTO', 'values')
if not self.data['column_list']:
raise SQLCompositorBadInput('Must set column_list first to the list of columns you wish to insert')
if not isinstance(values, (list, tuple)):
raise SQLCompositorBadInput('Must provide a list of values for values in insert statments')
if isinstance(values[0], (list, tuple)):
for row in values:
if len(row) != len(self.data['column_list']):
raise SQLCompositorBadInput('Must provide a list of lists each with the same length as the columns provided to insert')
self.many_query = True
elif len(values) != len(self.data['column_list']):
raise SQLCompositorBadInput('Must provide a list with the same length as the columns provided to insert')
self._set_query_data_only_once('values', self._preprocess_values(values))
if autorun:
# Since all data is now available
return self.run()
else:
return self
def values_mapped(self, values, autorun=True):
self.expect_kind('INSERT INTO', 'values')
if not self.data['column_list']:
raise SQLCompositorBadInput('Must set column_list first to the list of columns you wish to insert')
if isinstance(values, dict):
# unmap_index performs column name checking
values = unmap_index(self.data['column_list'], values)
elif isinstance(values, (list, tuple)):
for v in values:
expect_type(v, dict, 'values_mapped input row dictionary')
# unmap_index performs column name checking
values = [unmap_index(self.data['column_list'], v) for v in values]
self.many_query = True
else:
raise SQLCompositorBadInput('Must provide a list of value dicts or one value dict for values_mapped in insert statments')
self._set_query_data_only_once('values', self._preprocess_values(values))
if autorun:
# Since all data is now available
return self.run()
else:
return self
def order_by(self, columns):
self.expect_kind('SELECT', 'order_by')
expect_type(columns, (list, tuple, dict, str), 'order by column(s)')
if not columns:
raise SQLCompositorBadInput('Must specify one or more columns to order by')
if isinstance(columns, (dict, str)):
columns = [columns]
order_by_tuples = []
for c_obj in columns:
if isinstance(c_obj, str):
c_obj = {'column': c_obj, 'direction': 'ASC'}
expect_type(c_obj, dict, 'order_by column/direction object')
if 'column' not in c_obj:
raise SQLCompositorBadInput('Complex order_by request must contain a column key '
'and an optional direction key in the input dictionary')
column = c_obj['column']
direction = c_obj.get('direction', 'ASC').upper()
if column not in self.valid_columns:
raise SQLCompositorBadInput(f'Unknown column: {column}')
order_by_tuples.append((column, direction))
self._set_query_data_only_once('order_by', order_by_tuples)
return self
def limit(self, value):
self.expect_kind('SELECT', 'limit')
value = cast_expect_type(value, type_pos_int, 'LIMIT')
self._set_query_data_only_once('limit', value)
return self
def offset(self, value):
self.expect_kind('SELECT', 'offset')
value = cast_expect_type(value, type_non_neg_int, 'OFFSET')
self._set_query_data_only_once('offset', value)
return self
def count(self):
self.expect_kind('SELECT', 'count')
if self.count_mode:
raise SQLCompositorBadInput('Count already set on this query')
self.count_mode = True
return self
# Run and return the result of the query
def result(self):
column_list = self.data['column_list']
escaped_column_list = None
if column_list:
escaped_column_list = ','.join([f'"{c}"' for c in column_list])
where = self.data['where']
order_by = self.data['order_by']
limit = self.data['limit']
offset = self.data['offset']
values = self.data['values']
postprocessors = None
if self.kind == 'SELECT':
query_str = 'SELECT ' + escaped_column_list + ' FROM ' + self.table_name
postprocessors = self.db.get_postprocessors(self.table_name)
elif self.kind == 'UPDATE':
query_str = 'UPDATE ' + self.table_name + ' SET '
set_phrases = []
add_fill_values = []
for column, value in self.data['set_values'].items():
set_phrases.append(f'"{column}" = ?')
add_fill_values.append(value)
query_str += ','.join(set_phrases)
add_fill_values.extend(self.fill_values)
self.fill_values = add_fill_values
elif self.kind == 'INSERT INTO':
query_str = 'INSERT INTO ' + self.table_name + '(' + escaped_column_list + ')'
elif self.kind == 'DELETE':
query_str = 'DELETE FROM ' + self.table_name
if where:
query_str += ' WHERE ' + where
if order_by:
query_str += ' ORDER BY ' + ','.join([f'"{c}" {d}' for c, d in order_by])
if limit:
query_str += f' LIMIT {limit}'
if offset is not None:
query_str += f' OFFSET {offset}'
if values:
query_str += ' VALUES (' + ','.join(['?'] * len(column_list)) + ')'
self.fill_values = values
if self.count_mode:
query_str = 'SELECT COUNT(*) FROM (' + query_str + ')'
if ';' in query_str:
raise SQLCompositorBadInput('Composite statements are not allowed')
self._validate_clause(query_str, self.fill_values)
if self.many_query:
return self.db.executemany(query_str, self.fill_values, postprocessors, column_list)
return self.db.execute(query_str, self.fill_values, postprocessors, column_list)
# For executing a query directly (used for update().set_values().where().run() etc.
# Insert into can autorun, and all, one, one_or_none forms are used for select
def run(self):
return self.result()
# Convenience functions for different kinds and numbers of results
def all(self):
return self.result().all()
def one(self):
return self.result().one()
def scalar(self):
row = self.result().one()
if len(row) != 1:
raise SQLCompositorBadResult(f'Expected a scalar output, got {len(row)} results instead')
return row[0]
def one_or_none(self):
return self.result().one_or_none()
def all_mapped(self):
return map_index(self._get_table_map(), self.result())
def one_mapped(self):
return map_index_one_row(self._get_table_map(), self.result().one())
def one_or_none_mapped(self):
return map_index_one_row(self._get_table_map(), self.result().one_or_none()) | restomatic/json_sql_compositor.py | import sqlite3
from .validations import type_pos_int, type_non_neg_int, expect_in, expect_type, expect_len_range, cast_expect_type
from .shared_exceptions import StatusMessageException
def _process_values(values, processors, column_list, context):
if not values or not processors:
return values
if isinstance(values, (list, tuple)):
if isinstance(values[0], (list, tuple, dict)):
return [_process_values(v, processors, column_list, context) for v in values]
processors = {i: processors[c] for i, c in enumerate(column_list) if c in processors}
iterator = range(len(values))
values = list(values)
else:
iterator = values.keys()
for key in iterator:
if key in processors:
p_list = processors[key]
if not isinstance(p_list, (list, tuple)):
p_list = [p_list]
for p in p_list:
values[key] = p(values[key], **context)
return values
class SQLResult():
"""Result object from SQLite queries"""
def __init__(self, result_cursor, postprocessors=None, column_list=None):
self.result_cursor = result_cursor
self.postprocessors = postprocessors
self.column_list = column_list
def _postprocess_values(self, values):
return _process_values(values, self.postprocessors, self.column_list, context={})
# This can be used as a iterator, WILL run the postprocessors
def __iter__(self):
return self
def __next__(self):
return self._postprocess_values(next(self.result_cursor))
# Convenience functions for getting certain numbers of results, WILL run the postprocessors
def one_or_none(self):
return self.one(True)
def one(self, none_ok=False):
first_row = self.result_cursor.fetchone()
if not first_row:
if not none_ok:
raise SQLCompositorBadResult('Did not find any results for query, where exactly one was expected')
return None
if self.result_cursor.fetchone():
raise SQLCompositorBadResult('Found too many results for query, where at most one was expected')
return self._postprocess_values(first_row)
def all(self):
return self._postprocess_values(self.result_cursor.fetchall())
# Built-in functions in sqlite3, note that these DO NOT run the postprocessors, for raw data access
def lastrowid(self):
return self.result_cursor.lastrowid
def fetchone(self):
return self.result_cursor.fetchone()
def fetchmany(self, size=None):
if not size:
return self.result_cursor.fetchmany()
return self.result_cursor.fetchmany(size)
def fetchall(self):
return self.result_cursor.fetchall()
class SQLiteDB():
"""SQLite Database interface to auto-generate queries and results"""
def __init__(self, db_path, table_mappers, preprocessors=None, postprocessors=None,
enable_foreign_key_constraints=False):
self.db_path = db_path
if not table_mappers:
raise SQLCompositorBadInput('Must define table_mappers to use this interface')
self.table_mappers = table_mappers
self.current_connection = None
self.current_cursor = None
self.enable_foreign_key_constraints = enable_foreign_key_constraints
self.preprocessors = preprocessors
self.postprocessors = postprocessors
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def __del__(self):
self.close()
def is_valid_table(self, table_name):
return table_name in self.table_mappers
def get_preprocessors(self, table_name):
if not self.preprocessors:
return None
return self.preprocessors.get(table_name)
def get_postprocessors(self, table_name):
if not self.postprocessors:
return None
return self.postprocessors.get(table_name)
def select_all(self, table_name):
return SQLQuery('SELECT', table_name, self).column_list(self.table_mappers[table_name])
def select(self, table_name, columns):
return SQLQuery('SELECT', table_name, self).column_list(columns)
def update(self, table_name):
return SQLQuery('UPDATE', table_name, self)
def update_mapped(self, table_name, set_values):
return self.update(table_name).set_values(set_values)
def insert(self, table_name, columns):
return self.insert_into(table_name, columns)
def insert_mapped(self, table_name, data, autorun=True):
return self.insert_into_mapped(table_name, data, autorun=autorun)
def insert_into(self, table_name, columns):
return SQLQuery('INSERT INTO', table_name, self).column_list(columns)
def insert_into_mapped(self, table_name, data, autorun=True):
if isinstance(data, (list, tuple)):
# Auto-detect column list
columns = set()
for row in data:
columns.update(list(row.keys()))
return SQLQuery('INSERT INTO', table_name, self).column_list(list(columns)).values_mapped(data, autorun=autorun)
else:
return SQLQuery('INSERT INTO', table_name, self).column_list(list(data.keys())).values_mapped(data, autorun=autorun)
def delete(self, table_name):
# TODO: Delete-all protection?
return SQLQuery('DELETE', table_name, self)
def connection(self):
if not self.current_connection:
self.current_connection = sqlite3.connect(self.db_path)
return self.current_connection
def cursor(self):
if not self.current_cursor:
self.current_cursor = self.connection().cursor()
if self.enable_foreign_key_constraints:
self.current_cursor.execute('PRAGMA foreign_keys = ON')
return self.current_cursor
def execute(self, query_str, fill_values=None, postprocessors=None, column_list=None):
cur = self.cursor()
if not fill_values:
cur.execute(query_str)
else:
cur.execute(query_str, fill_values)
return SQLResult(cur, postprocessors, column_list)
def executemany(self, query_str, fill_values, postprocessors=None, column_list=None):
cur = self.cursor()
cur.executemany(query_str, fill_values)
return SQLResult(cur, postprocessors, column_list)
def rollback(self):
if not self.current_connection:
return
self.current_connection.rollback()
def in_transaction(self):
if not self.current_connection:
return False
return self.current_connection.in_transaction
def commit(self, no_changes_ok=False):
if not self.in_transaction():
if no_changes_ok:
return
raise RuntimeError('No changes in a transaction open for SQLiteDB instance, cannot commit nothing')
self.current_connection.commit()
def close(self):
if not self.current_connection:
return
# Default is to NOT commit any changes, be sure to call commit first!
self.current_connection.close()
self.current_cursor = None
self.current_connection = None
class SQLCompositorBadInput(StatusMessageException):
def __init__(self, message, status_code=None, additional_information=None):
StatusMessageException.__init__(self, message, status_code, additional_information)
class SQLCompositorBadResult(StatusMessageException):
def __init__(self, message, status_code=None, additional_information=None):
StatusMessageException.__init__(self, message, status_code, additional_information)
def _process_single_column_values(values, column, processors, context):
if not values or not processors:
return values
if column in processors:
p_list = processors[column]
if not isinstance(p_list, (list, tuple)):
p_list = [p_list]
for p in p_list:
if isinstance(values, (list, tuple)):
values = [p(v, **context) for v in values]
else:
values = p(values, **context)
return values
def generate_selector(selector, fill_values, valid_columns, processors, context):
"""Generates a selector from the given JSON-style selector"""
if isinstance(selector, dict):
if len(selector) != 1:
raise SQLCompositorBadInput('Each nested selector must have only one logical operator: AND or OR')
for kind, s_list in selector.items():
kind = kind.upper()
expect_type(s_list, (list, tuple), 'selector')
expect_in(kind, ('AND', 'OR'), 'logical operator')
return f' {kind} '.join([f'({generate_selector(s, fill_values, valid_columns, processors, context)})' for s in s_list])
expect_type(selector, (list, tuple), 'selector')
expect_len_range(selector, 2, 3, 'selector')
column = selector[0]
expect_in(column, valid_columns, 'column name')
operator = selector[1].lower()
value = None
value_required = True
value_expected_types = None
value_list = False
if operator in ('eq', '=', '=='):
sql_operator = '='
elif operator in ('lt', '<'):
sql_operator = '<'
elif operator in ('gt', '>'):
sql_operator = '>'
elif operator in ('lte', '<='):
sql_operator = '<='
elif operator in ('gte', '>='):
sql_operator = '>='
elif operator in ('in'):
sql_operator = 'IN'
value_expected_types = (list, tuple)
value_list = True
elif operator in ('notin', 'not_in'):
sql_operator = 'NOT IN'
value_expected_types = (list, tuple)
value_list = True
elif operator in ('like'):
sql_operator = 'LIKE'
value_expected_types = str
elif operator in ('isnull', 'is_null'):
# TODO: 'is', 'null' or 'is', None options?
sql_operator = 'IS NULL'
value_required = False
elif operator in ('isnotnull', 'is_not_null'):
sql_operator = 'IS NOT NULL'
value_required = False
else:
raise SQLCompositorBadInput('Unsupported Operator')
if value_required:
if len(selector) != 3 or selector[2] is None:
raise SQLCompositorBadInput(f'Must provide a non-null value for comparison for operator {operator}')
value = _process_single_column_values(selector[2], column, processors, context)
if value_expected_types:
expect_type(value, value_expected_types, f'value for {operator} operator')
if value_list:
fill_values.extend(value) # As this can be user-supplied
return f'"{column}" {sql_operator} (' + ','.join(['?'] * len(value)) + ')'
fill_values.append(value) # As this can be user-supplied
return f'"{column}" {sql_operator} ?'
return f'"{column}" {sql_operator}'
# Table to dict mapper for results (multiple rows)
def map_index(index_names, values):
mapped_values = []
if not values:
return mapped_values
for row in values:
mapped_values.append(map_index_one_row(index_names, row))
return mapped_values
def map_index_one_row(index_names, row):
if row is None:
return None
mapped_row = {}
for i, name in enumerate(index_names):
if len(row) <= i:
break
mapped_row[name] = row[i]
return mapped_row
# Dict to index mapper for input values (one row at a time)
def unmap_index(index_names, mapped_values):
values = [None] * len(index_names)
if not mapped_values:
return values
for key, value in mapped_values.items():
expect_in(key, index_names, 'column name')
values[index_names.index(key)] = value
return values
class SQLQuery():
"""Base class for handling and compositing SQL queries"""
def __init__(self, kind, table_name, db):
self.kind = kind.upper().strip()
expect_in(self.kind, ('UPDATE', 'SELECT', 'INSERT INTO', 'DELETE'), 'query kind')
self.table_name = table_name.strip()
if not db.is_valid_table(self.table_name):
raise SQLCompositorBadInput(f'Unknown table: {self.table_name}')
self.valid_columns = db.table_mappers[self.table_name]
self.data = {
'column_list': None,
'where': None,
'order_by': None,
'set_values': None,
'values': None,
'limit': None,
'offset': None,
}
self.count_mode = False
self.many_query = False
self.fill_values = []
self.db = db
def _get_table_map(self):
return self.db.table_mappers[self.table_name]
# Validations
def _validate_clause(self, clause, fill_values):
if clause.count('?') != len(fill_values):
raise RuntimeError('Internal Error: Expected equal number of ? substitution elements and fill_values')
def expect_kind(self, kinds, func_name):
if not isinstance(kinds, (list, tuple)):
kinds = [kinds]
if self.kind not in kinds:
raise SQLCompositorBadInput(f'Expected kind to be in {kinds} for function {func_name}')
def _set_query_data_only_once(self, key, value):
if self.data.get(key) is not None:
raise SQLCompositorBadInput(f'This query has {key} already set!')
self.data[key] = value
return self
# Functions to be called to compose a query
def column_list(self, column_list):
self.expect_kind(('SELECT', 'INSERT INTO'), 'column_list')
if not isinstance(column_list, (list, tuple)):
if column_list != '*':
raise SQLCompositorBadInput('Column list must be a list or *')
return self._set_query_data_only_once('column_list', self.valid_columns) # Use the original list here to ensure no ordering mismatches
for c in column_list:
if c not in self.valid_columns:
raise SQLCompositorBadInput(f'Unknown column: {c}')
self._set_query_data_only_once('column_list', column_list)
return self
def where(self, selector):
"""
Selectors are in the format:
['col', 'eq', 5] -> col = 5
{'and': [
['col', 'lt', 6],
['a', 'like', '%b%']
]} -> col < 6 AND a LIKE %b%
"""
self.expect_kind(('SELECT', 'UPDATE', 'DELETE'), 'where')
new_fill_values = []
clause = generate_selector(selector, new_fill_values, self.valid_columns, self.db.get_preprocessors(self.table_name),
{'db': self.db, 'mode': 'WHERE'})
self._validate_clause(clause, new_fill_values)
self._set_query_data_only_once('where', clause)
self.fill_values.extend(new_fill_values)
return self
def get_id(self, row_id):
# Shortcut for getting a particular ID
return self.where(['id', 'eq', row_id])
def _preprocess_values(self, values, mode='INSERT INTO'):
return _process_values(values, self.db.get_preprocessors(self.table_name), self.data['column_list'],
{'db': self.db, 'mode': mode})
def set_values(self, set_values):
self.expect_kind('UPDATE', 'set_values')
if not isinstance(set_values, dict):
raise SQLCompositorBadInput('Must provide a dictionary of column names to values for set_values')
for c in set_values.keys():
if c not in self.valid_columns:
raise SQLCompositorBadInput(f'Unknown column: {c}')
self._set_query_data_only_once('set_values', self._preprocess_values(set_values, mode='UPDATE'))
return self
def values(self, values, autorun=True):
self.expect_kind('INSERT INTO', 'values')
if not self.data['column_list']:
raise SQLCompositorBadInput('Must set column_list first to the list of columns you wish to insert')
if not isinstance(values, (list, tuple)):
raise SQLCompositorBadInput('Must provide a list of values for values in insert statments')
if isinstance(values[0], (list, tuple)):
for row in values:
if len(row) != len(self.data['column_list']):
raise SQLCompositorBadInput('Must provide a list of lists each with the same length as the columns provided to insert')
self.many_query = True
elif len(values) != len(self.data['column_list']):
raise SQLCompositorBadInput('Must provide a list with the same length as the columns provided to insert')
self._set_query_data_only_once('values', self._preprocess_values(values))
if autorun:
# Since all data is now available
return self.run()
else:
return self
def values_mapped(self, values, autorun=True):
self.expect_kind('INSERT INTO', 'values')
if not self.data['column_list']:
raise SQLCompositorBadInput('Must set column_list first to the list of columns you wish to insert')
if isinstance(values, dict):
# unmap_index performs column name checking
values = unmap_index(self.data['column_list'], values)
elif isinstance(values, (list, tuple)):
for v in values:
expect_type(v, dict, 'values_mapped input row dictionary')
# unmap_index performs column name checking
values = [unmap_index(self.data['column_list'], v) for v in values]
self.many_query = True
else:
raise SQLCompositorBadInput('Must provide a list of value dicts or one value dict for values_mapped in insert statments')
self._set_query_data_only_once('values', self._preprocess_values(values))
if autorun:
# Since all data is now available
return self.run()
else:
return self
def order_by(self, columns):
self.expect_kind('SELECT', 'order_by')
expect_type(columns, (list, tuple, dict, str), 'order by column(s)')
if not columns:
raise SQLCompositorBadInput('Must specify one or more columns to order by')
if isinstance(columns, (dict, str)):
columns = [columns]
order_by_tuples = []
for c_obj in columns:
if isinstance(c_obj, str):
c_obj = {'column': c_obj, 'direction': 'ASC'}
expect_type(c_obj, dict, 'order_by column/direction object')
if 'column' not in c_obj:
raise SQLCompositorBadInput('Complex order_by request must contain a column key '
'and an optional direction key in the input dictionary')
column = c_obj['column']
direction = c_obj.get('direction', 'ASC').upper()
if column not in self.valid_columns:
raise SQLCompositorBadInput(f'Unknown column: {column}')
order_by_tuples.append((column, direction))
self._set_query_data_only_once('order_by', order_by_tuples)
return self
def limit(self, value):
self.expect_kind('SELECT', 'limit')
value = cast_expect_type(value, type_pos_int, 'LIMIT')
self._set_query_data_only_once('limit', value)
return self
def offset(self, value):
self.expect_kind('SELECT', 'offset')
value = cast_expect_type(value, type_non_neg_int, 'OFFSET')
self._set_query_data_only_once('offset', value)
return self
def count(self):
self.expect_kind('SELECT', 'count')
if self.count_mode:
raise SQLCompositorBadInput('Count already set on this query')
self.count_mode = True
return self
# Run and return the result of the query
def result(self):
column_list = self.data['column_list']
escaped_column_list = None
if column_list:
escaped_column_list = ','.join([f'"{c}"' for c in column_list])
where = self.data['where']
order_by = self.data['order_by']
limit = self.data['limit']
offset = self.data['offset']
values = self.data['values']
postprocessors = None
if self.kind == 'SELECT':
query_str = 'SELECT ' + escaped_column_list + ' FROM ' + self.table_name
postprocessors = self.db.get_postprocessors(self.table_name)
elif self.kind == 'UPDATE':
query_str = 'UPDATE ' + self.table_name + ' SET '
set_phrases = []
add_fill_values = []
for column, value in self.data['set_values'].items():
set_phrases.append(f'"{column}" = ?')
add_fill_values.append(value)
query_str += ','.join(set_phrases)
add_fill_values.extend(self.fill_values)
self.fill_values = add_fill_values
elif self.kind == 'INSERT INTO':
query_str = 'INSERT INTO ' + self.table_name + '(' + escaped_column_list + ')'
elif self.kind == 'DELETE':
query_str = 'DELETE FROM ' + self.table_name
if where:
query_str += ' WHERE ' + where
if order_by:
query_str += ' ORDER BY ' + ','.join([f'"{c}" {d}' for c, d in order_by])
if limit:
query_str += f' LIMIT {limit}'
if offset is not None:
query_str += f' OFFSET {offset}'
if values:
query_str += ' VALUES (' + ','.join(['?'] * len(column_list)) + ')'
self.fill_values = values
if self.count_mode:
query_str = 'SELECT COUNT(*) FROM (' + query_str + ')'
if ';' in query_str:
raise SQLCompositorBadInput('Composite statements are not allowed')
self._validate_clause(query_str, self.fill_values)
if self.many_query:
return self.db.executemany(query_str, self.fill_values, postprocessors, column_list)
return self.db.execute(query_str, self.fill_values, postprocessors, column_list)
# For executing a query directly (used for update().set_values().where().run() etc.
# Insert into can autorun, and all, one, one_or_none forms are used for select
def run(self):
return self.result()
# Convenience functions for different kinds and numbers of results
def all(self):
return self.result().all()
def one(self):
return self.result().one()
def scalar(self):
row = self.result().one()
if len(row) != 1:
raise SQLCompositorBadResult(f'Expected a scalar output, got {len(row)} results instead')
return row[0]
def one_or_none(self):
return self.result().one_or_none()
def all_mapped(self):
return map_index(self._get_table_map(), self.result())
def one_mapped(self):
return map_index_one_row(self._get_table_map(), self.result().one())
def one_or_none_mapped(self):
return map_index_one_row(self._get_table_map(), self.result().one_or_none()) | 0.485844 | 0.32603 |
from __future__ import print_function
import time
import sys
from GeneticAlgorithm.fitness import assess_chromosome_fitness
from GeneticAlgorithm.genetic_engine import *
import subprocess
import unittest
import logging
from logging import config
image_size = 28
num_labels = 10
img_rows = 28
img_cols = 28
def unpack_testing_data(num_labels):
(train_dataset, train_labels), (test_dataset, test_labels) = mnist.load_data()
if K.image_data_format() == 'channels_first':
train_dataset = train_dataset.reshape(train_dataset.shape[0], 1, img_rows, img_cols)
test_dataset = test_dataset.reshape(test_dataset.shape[0], 1, img_rows, img_cols)
else:
train_dataset = train_dataset.reshape(train_dataset.shape[0], img_rows, img_cols, 1)
test_dataset = test_dataset.reshape(test_dataset.shape[0], img_rows, img_cols, 1)
train_dataset = train_dataset.astype('float32')
test_dataset = test_dataset.astype('float32')
train_dataset /= 255
test_dataset /= 255
train_labels = to_categorical(train_labels, num_labels)
test_labels = to_categorical(test_labels, num_labels)
return {"train_dataset": train_dataset, "train_labels": train_labels,
"valid_dataset": None, "valid_labels": None,
"test_dataset": test_dataset, "test_labels": test_labels}
def get_git_hash():
return subprocess.check_output(["git", "describe", "--always"]).strip()
def set_seed(logger):
seed = random.randrange(sys.maxsize)
rng = random.Random(seed)
logger.info("Seed was: %f", seed)
class MNISTTest(unittest.TestCase):
def test_encoding(self):
logging.config.fileConfig('GeneticAlgorithm/logs/logging.conf')
logger = logging.getLogger('testFile')
logger.info("Setting seed")
set_seed(logger)
logger.info("starting test...")
logger.info(get_git_hash())
start = time.time()
best = get_best(3, (28, 28, 1), unpack_testing_data(10))
end = time.time()
logger.info("time to best %f", end-start)
self.assertTrue(assess_chromosome_fitness(best, evaluate_best=True, eval_epochs=100, **unpack_testing_data(10))[0] > 0.99)
if __name__ == '__main__':
unittest.main() | examples/MNIST.py | from __future__ import print_function
import time
import sys
from GeneticAlgorithm.fitness import assess_chromosome_fitness
from GeneticAlgorithm.genetic_engine import *
import subprocess
import unittest
import logging
from logging import config
image_size = 28
num_labels = 10
img_rows = 28
img_cols = 28
def unpack_testing_data(num_labels):
(train_dataset, train_labels), (test_dataset, test_labels) = mnist.load_data()
if K.image_data_format() == 'channels_first':
train_dataset = train_dataset.reshape(train_dataset.shape[0], 1, img_rows, img_cols)
test_dataset = test_dataset.reshape(test_dataset.shape[0], 1, img_rows, img_cols)
else:
train_dataset = train_dataset.reshape(train_dataset.shape[0], img_rows, img_cols, 1)
test_dataset = test_dataset.reshape(test_dataset.shape[0], img_rows, img_cols, 1)
train_dataset = train_dataset.astype('float32')
test_dataset = test_dataset.astype('float32')
train_dataset /= 255
test_dataset /= 255
train_labels = to_categorical(train_labels, num_labels)
test_labels = to_categorical(test_labels, num_labels)
return {"train_dataset": train_dataset, "train_labels": train_labels,
"valid_dataset": None, "valid_labels": None,
"test_dataset": test_dataset, "test_labels": test_labels}
def get_git_hash():
return subprocess.check_output(["git", "describe", "--always"]).strip()
def set_seed(logger):
seed = random.randrange(sys.maxsize)
rng = random.Random(seed)
logger.info("Seed was: %f", seed)
class MNISTTest(unittest.TestCase):
def test_encoding(self):
logging.config.fileConfig('GeneticAlgorithm/logs/logging.conf')
logger = logging.getLogger('testFile')
logger.info("Setting seed")
set_seed(logger)
logger.info("starting test...")
logger.info(get_git_hash())
start = time.time()
best = get_best(3, (28, 28, 1), unpack_testing_data(10))
end = time.time()
logger.info("time to best %f", end-start)
self.assertTrue(assess_chromosome_fitness(best, evaluate_best=True, eval_epochs=100, **unpack_testing_data(10))[0] > 0.99)
if __name__ == '__main__':
unittest.main() | 0.474631 | 0.403449 |
from telethon.sync import TelegramClient
from telethon.tl.types import InputPeerChannel
from telethon.errors.rpcerrorlist import PeerFloodError, UserPrivacyRestrictedError, PhoneNumberBannedError, ChatAdminRequiredError
from telethon.errors.rpcerrorlist import ChatWriteForbiddenError, UserBannedInChannelError, UserAlreadyParticipantError, FloodWaitError
from telethon.tl.functions.channels import InviteToChannelRequest
import sys
from telethon.tl.functions.messages import ImportChatInviteRequest, AddChatUserRequest
from telethon.tl.functions.channels import JoinChannelRequest
from telethon.tl.types import UserStatusRecently
import time
import random
from colorama import init, Fore
import os
import pickle
init()
r = Fore.RED
lg = Fore.GREEN
rs = Fore.RESET
w = Fore.WHITE
grey = '\033[97m'
cy = Fore.CYAN
ye = Fore.YELLOW
colors = [r, lg, w, ye, cy]
info = lg + '[' + w + 'i' + lg + ']' + rs
error = lg + '[' + r + '!' + lg + ']' + rs
success = w + '[' + lg + '*' + w + ']' + rs
INPUT = lg + '[' + cy + '~' + lg + ']' + rs
plus = w + '[' + lg + '+' + w + ']' + rs
minus = w + '[' + lg + '-' + w + ']' + rs
def banner():
b = [
' ',
' ░█▀▄▀█ █▀▀█ █── █── █▀▀█ ▀▀█▀▀ █▀▀ █── █▀▀ █▀▀▀ █▀▀█ █▀▀█ █▀▄▀█ ',
' ░█░█░█ █▄▄█ █── █── █▄▄█ ─░█── █▀▀ █── █▀▀ █─▀█ █▄▄▀ █▄▄█ █─▀─█ ',
' ░█──░█ ▀──▀ ▀▀▀ ▀▀▀ ▀──▀ ─░█── ▀▀▀ ▀▀▀ ▀▀▀ ▀▀▀▀ ▀─▀▀ ▀──▀ ▀───▀ ',
' 〸🝗㇄🝗Ꮆ尺闩爪 ',
' /=\ | | ' ,
' \ /=| /= =/ /=\ /=| /=\ /== ' ,
' \=/ \=| | |\ \=/ \=| \= ==/ ' ,
' ========================================================'
]
for char in b:
print(f'{random.choice(colors)}{char}{rs}')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
accounts = []
f = open('OTP.txt', 'rb')
while True:
try:
accounts.append(pickle.load(f))
except EOFError:
break
# TODO:
print('\n' + info + lg + '🔍 Cek akun yang dibanned...' + rs)
for a in accounts:
phn = a[0]
print(f'{plus}{grey} Checking {lg}{phn}')
clnt = TelegramClient(f'sessions/{phn}', 19286603, '3f459e22ac139db64f0ddcd2c70ab1ba')
clnt.connect()
banned = []
if not clnt.is_user_authorized():
try:
clnt.send_code_request(phn)
print('OK')
except PhoneNumberBannedError:
print(f'{error} {w}{phn} {r}⛔️ is banned!{rs}')
banned.append(a)
for z in banned:
accounts.remove(z)
print(info+lg+'❌ Kembali Ke Setting.bat Untuk Menghapus Akun Yang DiBanned'+rs)
time.sleep(0.5)
clnt.disconnect()
print(info+' Sessions created!')
clr()
banner()
def log_status(scraped, index):
with open('status.dat', 'wb') as f:
pickle.dump([scraped, int(index)], f)
f.close()
print(f'{info}{lg} Session stored in {w}status.dat{lg}')
def exit_window():
input(f'\n{cy} 📍 Tekan ENTER Untuk Keluar...')
clr()
banner()
sys.exit()
try:
with open('status.dat', 'rb') as f:
status = pickle.load(f)
f.close()
lol = input(f'{INPUT}{cy} ✍️ Resume scraping members from {w}{status[0]}{lg}? [y/n]: {r}')
if 'y' in lol:
scraped_grp = status[0] ; index = int(status[1])
else:
if os.name == 'nt':
os.system('del status.dat')
else:
os.system('rm status.dat')
scraped_grp = input(f'{INPUT}{cy} ⚙️ Masukkan Link Group Target Dengan Format [https://t.me/NamaGroup] Atau [https://t.me/joinchat/xxxxxxxxx] : {r}')
index = 0
except:
scraped_grp = input(f'{INPUT}{cy} ⚙️ Masukkan Link Group Target Dengan Format [https://t.me/NamaGroup] Atau [https://t.me/joinchat/xxxxxxxxx] : {r}')
index = 0
accounts = []
f = open('OTP.txt', 'rb')
while True:
try:
accounts.append(pickle.load(f))
except EOFError:
break
print(f'{info}{lg} ✂️ Total Akun: {w}{len(accounts)}')
number_of_accs = int(input(f'{INPUT}{cy} ✍️ Masukkan Jumlah Akun Yang DI Gunakan : {r}'))
print(f'{info}{cy} 📍 Pilih {lg}')
print(f'{cy}[0]{lg}📍 Tambah Ke Group Public')
print(f'{cy}[1]{lg}📍 Tambah Ke Group Private')
choice = int(input(f'{INPUT}{cy} 📍 Masukkan Pilihan: {r}'))
if choice == 0:
target = str(input(f'{INPUT}{cy} 📍 Masukkan link grup public : {r}'))
else:
target = str(input(f'{INPUT}{cy} 📍 Masukkan link grup private : {r}'))
print(f'{grey}_'*50)
to_use = [x for x in accounts[:number_of_accs]]
for l in to_use: accounts.remove(l)
with open('OTP.txt', 'wb') as f:
for a in accounts:
pickle.dump(a, f)
for ab in to_use:
pickle.dump(ab, f)
f.close()
sleep_time = int(input(f'{INPUT}{cy} Masukkan Waktu Delay {w}[{lg}0 for None{w}]: {r}'))
print(f'{success}{lg} 📌 Tambah Member Dari {w}{len(to_use)}{lg} Akun(s) --')
adding_status = 0
approx_members_count = 0
for acc in to_use:
stop = index + 60
c = TelegramClient(f'sessions/{acc[0]}', 19286603 , '3f459e22ac139db64f0ddcd2c70ab1ba')
print(f'{plus}{grey} User: {cy}{acc[0]}{lg} -- {cy}Starting session... ')
c.start(acc[0])
acc_name = c.get_me().first_name
try:
if '/joinchat/' in scraped_grp:
g_hash = scraped_grp.split('/joinchat/')[1]
try:
c(ImportChatInviteRequest(g_hash))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk Scrape')
except UserAlreadyParticipantError:
pass
else:
c(JoinChannelRequest(scraped_grp))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk Scrape')
scraped_grp_entity = c.get_entity(scraped_grp)
if choice == 0:
c(JoinChannelRequest(target))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk ADD')
target_entity = c.get_entity(target)
target_details = InputPeerChannel(target_entity.id, target_entity.access_hash)
else:
try:
grp_hash = target.split('/joinchat/')[1]
c(ImportChatInviteRequest(grp_hash))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk ADD')
except UserAlreadyParticipantError:
pass
target_entity = c.get_entity(target)
target_details = target_entity
except Exception as e:
print(f'{error}{r} User: {cy}{acc_name}{lg}📌 Gagal bergabung dengan grup')
print(f'{error} {r}{e}')
continue
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 {cy}Retrieving entities...')
try:
members = []
members = c.get_participants(scraped_grp_entity, aggressive=False)
except Exception as e:
print(f'{error}{r} Tidak Dapat Scrape Member')
print(f'{error}{r} {e}')
continue
approx_members_count = len(members)
assert approx_members_count != 0
if index >= approx_members_count:
print(f'{error}{lg}Tidak Ada Members Untuk Di ADD!')
continue
print(f'{info}{lg} Start: {w}{index}')
peer_flood_status = 0
for user in members[index:stop]:
index += 1
if peer_flood_status == 10:
print(f'{error}{r} 📌 Ada begitu banyak kesalahan! Sesi terakhir ...')
break
try:
if choice == 0:
c(InviteToChannelRequest(target_details, [user]))
else:
c(AddChatUserRequest(target_details.id, user, 42))
user_id = user.first_name
target_title = target_entity.title
print(f'{plus}{grey} User: {cy}{acc_name}{lg} -- {cy}{user_id} {lg}--> {cy}{target_title}')
adding_status += 1
print(f'{info}{grey} ❌ User: {cy}{acc_name}{lg} -- Sleep {w}{sleep_time} {lg}second(s)')
time.sleep(sleep_time)
except UserPrivacyRestrictedError:
print(f'{minus}{grey} ❌ User: {cy}{acc_name}{lg} -- {r}📌 Member Memakai Privacy ')
continue
except PeerFloodError:
print(f'{error}{grey} ❌ User: {cy}{acc_name}{lg} -- {r}📌 Terdeteksi Flood.')
peer_flood_status += 1
continue
except ChatWriteForbiddenError:
print(f'{error}{r} ❌ Tidak dapat menambahkan ke grup. Hubungi admin grup untuk mengaktifkan ADD Member')
if index < approx_members_count:
log_status(scraped_grp, index)
exit_window()
except UserBannedInChannelError:
print(f'{error}{grey} User: {cy}{acc_name}{lg} -- {r}DiBanned Nulis Chat DI Group')
break
except ChatAdminRequiredError:
print(f'{error}{grey} User: {cy}{acc_name}{lg} -- {r}Chat Admin Untuk ADD Member')
break
except UserAlreadyParticipantError:
print(f'{minus}{grey} User: {cy}{acc_name}{lg} -- {r}Member Sudah ADA ')
continue
except FloodWaitError as e:
print(f'{error}{r} {e}')
break
except ValueError:
print(f'{error}{r} ❌ Kesalahan dalam Entitas')
continue
except KeyboardInterrupt:
print(f'{error}{r} ---- Proses Di Hentikan ----')
if index < len(members):
log_status(scraped_grp, index)
exit_window()
except Exception as e:
print(f'{error} {e}')
continue
if adding_status != 0:
print(f"\n{info}{lg} ❤️ Proses Selesai")
try:
if index < approx_members_count:
log_status(scraped_grp, index)
exit_window()
except:
exit_window() | start.py | from telethon.sync import TelegramClient
from telethon.tl.types import InputPeerChannel
from telethon.errors.rpcerrorlist import PeerFloodError, UserPrivacyRestrictedError, PhoneNumberBannedError, ChatAdminRequiredError
from telethon.errors.rpcerrorlist import ChatWriteForbiddenError, UserBannedInChannelError, UserAlreadyParticipantError, FloodWaitError
from telethon.tl.functions.channels import InviteToChannelRequest
import sys
from telethon.tl.functions.messages import ImportChatInviteRequest, AddChatUserRequest
from telethon.tl.functions.channels import JoinChannelRequest
from telethon.tl.types import UserStatusRecently
import time
import random
from colorama import init, Fore
import os
import pickle
init()
r = Fore.RED
lg = Fore.GREEN
rs = Fore.RESET
w = Fore.WHITE
grey = '\033[97m'
cy = Fore.CYAN
ye = Fore.YELLOW
colors = [r, lg, w, ye, cy]
info = lg + '[' + w + 'i' + lg + ']' + rs
error = lg + '[' + r + '!' + lg + ']' + rs
success = w + '[' + lg + '*' + w + ']' + rs
INPUT = lg + '[' + cy + '~' + lg + ']' + rs
plus = w + '[' + lg + '+' + w + ']' + rs
minus = w + '[' + lg + '-' + w + ']' + rs
def banner():
b = [
' ',
' ░█▀▄▀█ █▀▀█ █── █── █▀▀█ ▀▀█▀▀ █▀▀ █── █▀▀ █▀▀▀ █▀▀█ █▀▀█ █▀▄▀█ ',
' ░█░█░█ █▄▄█ █── █── █▄▄█ ─░█── █▀▀ █── █▀▀ █─▀█ █▄▄▀ █▄▄█ █─▀─█ ',
' ░█──░█ ▀──▀ ▀▀▀ ▀▀▀ ▀──▀ ─░█── ▀▀▀ ▀▀▀ ▀▀▀ ▀▀▀▀ ▀─▀▀ ▀──▀ ▀───▀ ',
' 〸🝗㇄🝗Ꮆ尺闩爪 ',
' /=\ | | ' ,
' \ /=| /= =/ /=\ /=| /=\ /== ' ,
' \=/ \=| | |\ \=/ \=| \= ==/ ' ,
' ========================================================'
]
for char in b:
print(f'{random.choice(colors)}{char}{rs}')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
accounts = []
f = open('OTP.txt', 'rb')
while True:
try:
accounts.append(pickle.load(f))
except EOFError:
break
# TODO:
print('\n' + info + lg + '🔍 Cek akun yang dibanned...' + rs)
for a in accounts:
phn = a[0]
print(f'{plus}{grey} Checking {lg}{phn}')
clnt = TelegramClient(f'sessions/{phn}', 19286603, '3f459e22ac139db64f0ddcd2c70ab1ba')
clnt.connect()
banned = []
if not clnt.is_user_authorized():
try:
clnt.send_code_request(phn)
print('OK')
except PhoneNumberBannedError:
print(f'{error} {w}{phn} {r}⛔️ is banned!{rs}')
banned.append(a)
for z in banned:
accounts.remove(z)
print(info+lg+'❌ Kembali Ke Setting.bat Untuk Menghapus Akun Yang DiBanned'+rs)
time.sleep(0.5)
clnt.disconnect()
print(info+' Sessions created!')
clr()
banner()
def log_status(scraped, index):
with open('status.dat', 'wb') as f:
pickle.dump([scraped, int(index)], f)
f.close()
print(f'{info}{lg} Session stored in {w}status.dat{lg}')
def exit_window():
input(f'\n{cy} 📍 Tekan ENTER Untuk Keluar...')
clr()
banner()
sys.exit()
try:
with open('status.dat', 'rb') as f:
status = pickle.load(f)
f.close()
lol = input(f'{INPUT}{cy} ✍️ Resume scraping members from {w}{status[0]}{lg}? [y/n]: {r}')
if 'y' in lol:
scraped_grp = status[0] ; index = int(status[1])
else:
if os.name == 'nt':
os.system('del status.dat')
else:
os.system('rm status.dat')
scraped_grp = input(f'{INPUT}{cy} ⚙️ Masukkan Link Group Target Dengan Format [https://t.me/NamaGroup] Atau [https://t.me/joinchat/xxxxxxxxx] : {r}')
index = 0
except:
scraped_grp = input(f'{INPUT}{cy} ⚙️ Masukkan Link Group Target Dengan Format [https://t.me/NamaGroup] Atau [https://t.me/joinchat/xxxxxxxxx] : {r}')
index = 0
accounts = []
f = open('OTP.txt', 'rb')
while True:
try:
accounts.append(pickle.load(f))
except EOFError:
break
print(f'{info}{lg} ✂️ Total Akun: {w}{len(accounts)}')
number_of_accs = int(input(f'{INPUT}{cy} ✍️ Masukkan Jumlah Akun Yang DI Gunakan : {r}'))
print(f'{info}{cy} 📍 Pilih {lg}')
print(f'{cy}[0]{lg}📍 Tambah Ke Group Public')
print(f'{cy}[1]{lg}📍 Tambah Ke Group Private')
choice = int(input(f'{INPUT}{cy} 📍 Masukkan Pilihan: {r}'))
if choice == 0:
target = str(input(f'{INPUT}{cy} 📍 Masukkan link grup public : {r}'))
else:
target = str(input(f'{INPUT}{cy} 📍 Masukkan link grup private : {r}'))
print(f'{grey}_'*50)
to_use = [x for x in accounts[:number_of_accs]]
for l in to_use: accounts.remove(l)
with open('OTP.txt', 'wb') as f:
for a in accounts:
pickle.dump(a, f)
for ab in to_use:
pickle.dump(ab, f)
f.close()
sleep_time = int(input(f'{INPUT}{cy} Masukkan Waktu Delay {w}[{lg}0 for None{w}]: {r}'))
print(f'{success}{lg} 📌 Tambah Member Dari {w}{len(to_use)}{lg} Akun(s) --')
adding_status = 0
approx_members_count = 0
for acc in to_use:
stop = index + 60
c = TelegramClient(f'sessions/{acc[0]}', 19286603 , '3f459e22ac139db64f0ddcd2c70ab1ba')
print(f'{plus}{grey} User: {cy}{acc[0]}{lg} -- {cy}Starting session... ')
c.start(acc[0])
acc_name = c.get_me().first_name
try:
if '/joinchat/' in scraped_grp:
g_hash = scraped_grp.split('/joinchat/')[1]
try:
c(ImportChatInviteRequest(g_hash))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk Scrape')
except UserAlreadyParticipantError:
pass
else:
c(JoinChannelRequest(scraped_grp))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk Scrape')
scraped_grp_entity = c.get_entity(scraped_grp)
if choice == 0:
c(JoinChannelRequest(target))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk ADD')
target_entity = c.get_entity(target)
target_details = InputPeerChannel(target_entity.id, target_entity.access_hash)
else:
try:
grp_hash = target.split('/joinchat/')[1]
c(ImportChatInviteRequest(grp_hash))
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 Bergabung dengan grup untuk ADD')
except UserAlreadyParticipantError:
pass
target_entity = c.get_entity(target)
target_details = target_entity
except Exception as e:
print(f'{error}{r} User: {cy}{acc_name}{lg}📌 Gagal bergabung dengan grup')
print(f'{error} {r}{e}')
continue
print(f'{plus}{grey} User: {cy}{acc_name}{lg} 📌 {cy}Retrieving entities...')
try:
members = []
members = c.get_participants(scraped_grp_entity, aggressive=False)
except Exception as e:
print(f'{error}{r} Tidak Dapat Scrape Member')
print(f'{error}{r} {e}')
continue
approx_members_count = len(members)
assert approx_members_count != 0
if index >= approx_members_count:
print(f'{error}{lg}Tidak Ada Members Untuk Di ADD!')
continue
print(f'{info}{lg} Start: {w}{index}')
peer_flood_status = 0
for user in members[index:stop]:
index += 1
if peer_flood_status == 10:
print(f'{error}{r} 📌 Ada begitu banyak kesalahan! Sesi terakhir ...')
break
try:
if choice == 0:
c(InviteToChannelRequest(target_details, [user]))
else:
c(AddChatUserRequest(target_details.id, user, 42))
user_id = user.first_name
target_title = target_entity.title
print(f'{plus}{grey} User: {cy}{acc_name}{lg} -- {cy}{user_id} {lg}--> {cy}{target_title}')
adding_status += 1
print(f'{info}{grey} ❌ User: {cy}{acc_name}{lg} -- Sleep {w}{sleep_time} {lg}second(s)')
time.sleep(sleep_time)
except UserPrivacyRestrictedError:
print(f'{minus}{grey} ❌ User: {cy}{acc_name}{lg} -- {r}📌 Member Memakai Privacy ')
continue
except PeerFloodError:
print(f'{error}{grey} ❌ User: {cy}{acc_name}{lg} -- {r}📌 Terdeteksi Flood.')
peer_flood_status += 1
continue
except ChatWriteForbiddenError:
print(f'{error}{r} ❌ Tidak dapat menambahkan ke grup. Hubungi admin grup untuk mengaktifkan ADD Member')
if index < approx_members_count:
log_status(scraped_grp, index)
exit_window()
except UserBannedInChannelError:
print(f'{error}{grey} User: {cy}{acc_name}{lg} -- {r}DiBanned Nulis Chat DI Group')
break
except ChatAdminRequiredError:
print(f'{error}{grey} User: {cy}{acc_name}{lg} -- {r}Chat Admin Untuk ADD Member')
break
except UserAlreadyParticipantError:
print(f'{minus}{grey} User: {cy}{acc_name}{lg} -- {r}Member Sudah ADA ')
continue
except FloodWaitError as e:
print(f'{error}{r} {e}')
break
except ValueError:
print(f'{error}{r} ❌ Kesalahan dalam Entitas')
continue
except KeyboardInterrupt:
print(f'{error}{r} ---- Proses Di Hentikan ----')
if index < len(members):
log_status(scraped_grp, index)
exit_window()
except Exception as e:
print(f'{error} {e}')
continue
if adding_status != 0:
print(f"\n{info}{lg} ❤️ Proses Selesai")
try:
if index < approx_members_count:
log_status(scraped_grp, index)
exit_window()
except:
exit_window() | 0.123683 | 0.046141 |
import json
import platform
import warnings
import requests
from . import __version__, exceptions, models, API_CONFIG, DEFAULT_API_VERSION, DEFAULT_API_ENDPOINT
class SearchFilters:
"""Simple class to hold search filter constants."""
SYMPTOMS = "symptom"
RISK_FACTORS = "risk_factor"
LAB_TESTS = "lab_test"
ALL = [SYMPTOMS, RISK_FACTORS, LAB_TESTS]
SEARCH_FILTERS = SearchFilters()
class API:
"""Class which handles requests to the Infermedica API."""
# User-Agent for HTTP request
library_details = f"requests {requests.__version__}; python {platform.python_version()}"
user_agent = f"Infermedica-API-Python {__version__} ({library_details})"
def __init__(self, **kwargs):
"""
Initialize API object.
Usage::
>>> import infermedica_api
>>> api = infermedica_api.API(app_id='YOUR_APP_ID', app_key='YOUR_APP_KEY')
"""
self.endpoint = kwargs.get("endpoint", DEFAULT_API_ENDPOINT)
self.api_version = kwargs.get("api_version", DEFAULT_API_VERSION)
self.app_id = kwargs["app_id"] # Mandatory parameter, so not using `dict.get`
self.app_key = kwargs["app_key"] # Mandatory parameter, so not using `dict.get`
self.default_headers = self.__calculate_headers(kwargs)
if self.api_version in kwargs.get("api_definitions", {}) or {}:
self.api_methods = kwargs["api_definitions"][self.api_version]['methods']
elif self.api_version in API_CONFIG:
self.api_methods = API_CONFIG[self.api_version]['methods']
else:
self.api_methods = API_CONFIG[DEFAULT_API_VERSION]['methods']
def __calculate_headers(self, parameters):
headers = parameters.get("default_headers", {})
if parameters.get("model", None):
headers.update({
"Model": parameters["model"]
})
if parameters.get("dev_mode", None) and parameters["dev_mode"] == True:
headers.update({
"Dev-Mode": "true"
})
return headers
def __get_url(self, method):
return self.endpoint + self.api_version + method
def __get_headers(self, override):
"""Returns default HTTP headers."""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": self.user_agent,
"App-Id": self.app_id,
"App-Key": self.app_key
}
headers.update(self.default_headers)
headers.update(override)
return headers
def __api_call(self, url, method, **kwargs):
kwargs['headers'] = self.__get_headers(kwargs['headers'] or {})
response = requests.request(method, url, **kwargs)
return self.__handle_response(response)
def __handle_response(self, response):
"""
Validates HTTP response, if response is correct decode json data and returns dict object.
If response is not correct raise appropriate exception.
:returns: dict or list with response data
:rtype: dict or list
:raises:
infermedica_api.exceptions.BadRequest,
infermedica_api.exceptions.UnauthorizedAccess,
infermedica_api.exceptions.ForbiddenAccess,
infermedica_api.exceptions.ResourceNotFound,
infermedica_api.exceptions.MethodNotAllowed,
infermedica_api.exceptions.ServerError,
infermedica_api.exceptions.ConnectionError
"""
status = response.status_code
content = response.content.decode('utf-8')
if 200 <= status <= 299:
return json.loads(content) if content else {}
elif status == 400:
raise exceptions.BadRequest(response, content)
elif status == 401:
raise exceptions.UnauthorizedAccess(response, content)
elif status == 403:
raise exceptions.ForbiddenAccess(response, content)
elif status == 404:
raise exceptions.ResourceNotFound(response, content)
elif status == 405:
raise exceptions.MethodNotAllowed(response, content)
elif 500 <= status <= 599:
raise exceptions.ServerError(response, content)
else:
raise exceptions.ConnectionError(response, content)
def __get(self, method, params=None, headers=None):
"""Wrapper for a GET API call."""
return self.__api_call(self.__get_url(method), "GET", headers=headers, params=params)
def __post(self, method, data, params=None, headers=None):
"""Wrapper for a GET API call."""
return self.__api_call(self.__get_url(method), "POST", headers=headers, data=data, params=params)
def __get_method(self, name):
try:
return self.api_methods[name]
except KeyError as e:
raise exceptions.MethodNotAvailableInAPIVersion(self.api_version, name)
def __get_interview_id_headers(self, diagnosis_request=None, interview_id=None):
headers = {}
if interview_id:
headers['Interview-Id'] = interview_id
elif isinstance(diagnosis_request, models.Diagnosis) and diagnosis_request.interview_id:
headers['Interview-Id'] = diagnosis_request.interview_id
return headers
def info(self):
"""Makes an API request and returns basic API model information."""
return self.__get(self.__get_method('info'))
def search(self, phrase, sex=None, max_results=8, filters=None, **kwargs):
"""
Makes an API search request and returns list of dicts containing keys: 'id', 'label' and 'type'.
Each dict represent an evidence (symptom, lab test or risk factor).
By default only symptoms are returned, to include other evidence types use filters.
:param phrase: Phrase to look for.
:type phrase: str
:param sex: Sex of the patient 'female' or 'male'.
:type sex: str
:param max_results: Maximum number of result to return, default is 8.
:type max_results: int
:param filters: List of search filters, taken from SEARCH_FILTERS variable.
:type filters: list
:returns: A List of dicts with 'id' and 'label' keys.
:rtype: list
"""
method = self.__get_method('search')
params = kwargs.pop('params', {})
params.update({
'phrase': phrase,
'max_results': max_results
})
if sex:
params['sex'] = sex
if filters:
if isinstance(filters, (list, tuple)):
params['type'] = filters
elif isinstance(filters, str):
params['type'] = [filters]
for filter in params['type']:
if filter not in SEARCH_FILTERS.ALL:
raise exceptions.InvalidSearchFilter(filter)
return self.__get(method, params=params)
def lookup(self, phrase, sex=None):
"""
Makes an API lookup request and returns evidence details object.
:param phrase: Phrase to look for.
:type phrase: str
:param sex: Sex of the patient 'female' or 'male'.
:type sex: str
:returns: Dictionary with details.
:rtype: dict
"""
method = self.__get_method('lookup')
params = {
'phrase': phrase
}
if sex:
params['sex'] = sex
return self.__get(method, params=params)
def suggest(self, diagnosis_request, max_results=8, interview_id=None, **kwargs):
"""
Makes an API suggest request and returns a list of suggested evidence.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:returns: A list of suggestions, dicts with 'id', 'name' and 'common_name' keys.
:rtype: list
"""
method = self.__get_method('suggest')
headers = self.__get_interview_id_headers(interview_id=interview_id)
params = kwargs.pop('params', {})
params.update({'max_results': max_results})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
return self.__post(
method,
params=params,
headers=headers,
data=json.dumps(request)
)
def parse(self, text, include_tokens=False, interview_id=None, **kwargs):
"""
Makes an parse API request with provided text and include_tokens parameter.
Returns parse results with detailed list of mentions found in the text.
:param phrase: Text to parse.
:type phrase: str
:param include_tokens: Switch to manipulate the include_tokens parameter.
:type include_tokens: bool
:returns: A ParseResults object
:rtype: :class:`infermedica_api.models.ParseResults`
"""
method = self.__get_method('parse')
headers = self.__get_interview_id_headers(interview_id=interview_id)
params = kwargs.pop('params', {})
request = {
'text': text,
'include_tokens': include_tokens
}
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.ParseResults.from_json(response)
def diagnosis(self, diagnosis_request, interview_id=None, **kwargs):
"""
Makes a diagnosis API request with provided diagnosis data
and returns diagnosis question with possible conditions.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param interview_id: Unique interview id for diagnosis
:type interview_id: str
:returns: A Diagnosis object with api response
:rtype: :class:`infermedica_api.models.Diagnosis`
"""
method = self.__get_method('diagnosis')
headers = self.__get_interview_id_headers(
diagnosis_request=diagnosis_request,
interview_id=interview_id
)
params = kwargs.pop('params', {})
if isinstance(diagnosis_request, models.Diagnosis):
response = self.__post(
method,
json.dumps(diagnosis_request.get_api_request()),
params=params,
headers=headers
)
diagnosis_request.update_from_api(response)
return diagnosis_request
return self.__post(
method,
json.dumps(diagnosis_request),
params=params,
headers=headers
)
def explain(self, diagnosis_request, target_id, interview_id=None, **kwargs):
"""
Makes an explain API request with provided diagnosis data and target condition.
Returns explain results with supporting and conflicting evidences.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param target_id: Condition id for which explain shall be calculated.
:type target_id: str
:returns: A Diagnosis object with api response
:rtype: :class:`infermedica_api.models.Diagnosis`
"""
method = self.__get_method('explain')
headers = self.__get_interview_id_headers(diagnosis_request=diagnosis_request, interview_id=interview_id)
params = kwargs.pop('params', {})
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_explain_request(target_id)
else:
request = dict(diagnosis_request, **{'target': target_id})
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.ExplainResults.from_json(response)
def triage(self, diagnosis_request, interview_id=None, **kwargs):
"""
Makes a triage API request with provided diagnosis data.
Returns triage results dict.
See the docs: https://developer.infermedica.com/docs/triage.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:returns: A dict object with api response
:rtype: dict
"""
method = self.__get_method('triage')
headers = self.__get_interview_id_headers(diagnosis_request=diagnosis_request, interview_id=interview_id)
params = kwargs.pop('params', {})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
return self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
def condition_details(self, _id):
"""
Makes an API request and returns condition details object.
:param _id: Condition id
:type _id: str
:returns:A Condition object
:rtype: :class:`infermedica_api.models.Condition`
"""
method = self.__get_method('condition_details')
response = self.__get(method.format(**{'id': _id}))
return models.Condition.from_json(response)
def conditions_list(self):
"""
Makes an API request and returns list of condition details objects.
:returns: A ConditionList list object with Condition objects
:rtype: :class:`infermedica_api.models.ConditionList`
"""
response = self.__get(self.__get_method('conditions'))
return models.ConditionList.from_json(response)
def symptom_details(self, _id):
"""
Makes an API request and returns symptom details object.
:param _id: Symptom id
:type _id: str
:returns: A Symptom object
:rtype: :class:`infermedica_api.models.Symptom`
"""
method = self.__get_method('symptom_details')
response = self.__get(method.format(**{'id': _id}))
return models.Symptom.from_json(response)
def symptoms_list(self):
"""
Makes an API request and returns list of symptom details objects.
:returns: A SymptomList list object with Symptom objects
:rtype: :class:`infermedica_api.models.SymptomList`
"""
response = self.__get(self.__get_method('symptoms'))
return models.SymptomList.from_json(response)
def lab_test_details(self, _id):
"""
Makes an API request and returns lab_test details object.
:param _id: LabTest id
:type _id: str
:returns: A LabTest object
:rtype: :class:`infermedica_api.models.LabTest`
"""
method = self.__get_method('lab_test_details')
response = self.__get(method.format(**{'id': _id}))
return models.LabTest.from_json(response)
def lab_tests_list(self):
"""
Makes an API request and returns list of lab_test details objects.
:returns: A LabTestList list object with LabTest objects
:rtype: :class:`infermedica_api.models.LabTestList`
"""
response = self.__get(self.__get_method('lab_tests'))
return models.LabTestList.from_json(response)
def risk_factor_details(self, _id):
"""
Makes an API request and returns risk factor details object.
:param _id: risk factor id
:type _id: str
:returns: A RiskFactor object
:rtype: :class:`infermedica_api.models.RiskFactor`
"""
method = self.__get_method('risk_factor_details')
response = self.__get(method.format(**{'id': _id}))
return models.RiskFactor.from_json(response)
def risk_factors_list(self):
"""
Makes an API request and returns list of risk factors details objects.
:returns: A RiskFactorList list object with RiskFactor objects
:rtype: :class:`infermedica_api.models.RiskFactorList`
"""
response = self.__get(self.__get_method('risk_factors'))
return models.RiskFactorList.from_json(response)
def red_flags(self, diagnosis_request, max_results=8, interview_id=None, **kwargs):
"""
Makes an API request with provided diagnosis data and returns a list
of evidence that may be related to potentially life-threatening
conditions.
:param diagnosis_request: Diagnosis request object or diagnosis json.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param interview_id: Unique interview id for diagnosis
:type interview_id: str
:returns: A list of RedFlag objects
:rtype: :class:`infermedica_api.models.RedFlagList`
"""
method = self.__get_method('red_flags')
headers = self.__get_interview_id_headers(
diagnosis_request=diagnosis_request,
interview_id=interview_id,
)
params = kwargs.pop('params', {})
params.update({'max_results': max_results})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.RedFlagList.from_json(response)
def rationale(self, diagnosis_request, interview_id=None, **kwargs):
"""
Makes an API request with provided diagnosis data and returns
an explenation of why the given question has been selected by
the reasoning engine.
:param diagnosis_request: Diagnosis request object or diagnosis json.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param interview_id: Unique interview id for diagnosis
:type interview_id: str
:returns: An instance of the RationaleResult
:rtype: :class:`infermedica_api.models.RationaleResult`
"""
method = self.__get_method('rationale')
headers = self.__get_interview_id_headers(
diagnosis_request=diagnosis_request,
interview_id=interview_id,
)
params = kwargs.pop('params', {})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.RationaleResult.from_json(response)
__api__ = None
__api_aliased__ = {}
def get_api(alias=None):
"""
Returns global API object and if present,
otherwise raise MissingConfiguration exception.
:param alias: Alias of the API to retrieve
:type alias: str
:returns: An API object
:rtype: :class:`infermedica_api.webservice.API`
:raises: :class:`infermedica_api.exceptions.MissingConfiguration`
"""
global __api__
global __api_aliased__
if isinstance(alias, str):
try:
return __api_aliased__[alias]
except KeyError:
raise exceptions.MissingConfiguration(alias)
if __api__ is None:
raise exceptions.MissingConfiguration()
return __api__
def configure(options=None, **config):
"""
Configure and create new global API object with given configuration.
Configuration can be passed as a dict or separate arguments.
Returns newly created object.
Usage:
>>> import infermedica_api
>>> infermedica_api.configure({'app_id': 'YOUR_APP_ID', 'app_key': 'YOUR_APP_KEY'})
... or:
>>> import infermedica_api
>>> infermedica_api.configure(app_id='YOUR_APP_ID', app_key='YOUR_APP_KEY')
:param options: Dict with configuration data
:type options: dict
:returns: An API object
:rtype: :class:`infermedica_api.webservice.API`
"""
global __api__
global __api_aliased__
configuration = dict(options or {}, **config)
if 'alias' in configuration and isinstance(configuration['alias'], str):
__api_aliased__[configuration['alias']] = API(**configuration)
if configuration.get('default', False):
__api__ = __api_aliased__[configuration['alias']]
return __api_aliased__[configuration['alias']]
__api__ = API(**configuration)
return __api__ | infermedica_api/webservice.py | import json
import platform
import warnings
import requests
from . import __version__, exceptions, models, API_CONFIG, DEFAULT_API_VERSION, DEFAULT_API_ENDPOINT
class SearchFilters:
"""Simple class to hold search filter constants."""
SYMPTOMS = "symptom"
RISK_FACTORS = "risk_factor"
LAB_TESTS = "lab_test"
ALL = [SYMPTOMS, RISK_FACTORS, LAB_TESTS]
SEARCH_FILTERS = SearchFilters()
class API:
"""Class which handles requests to the Infermedica API."""
# User-Agent for HTTP request
library_details = f"requests {requests.__version__}; python {platform.python_version()}"
user_agent = f"Infermedica-API-Python {__version__} ({library_details})"
def __init__(self, **kwargs):
"""
Initialize API object.
Usage::
>>> import infermedica_api
>>> api = infermedica_api.API(app_id='YOUR_APP_ID', app_key='YOUR_APP_KEY')
"""
self.endpoint = kwargs.get("endpoint", DEFAULT_API_ENDPOINT)
self.api_version = kwargs.get("api_version", DEFAULT_API_VERSION)
self.app_id = kwargs["app_id"] # Mandatory parameter, so not using `dict.get`
self.app_key = kwargs["app_key"] # Mandatory parameter, so not using `dict.get`
self.default_headers = self.__calculate_headers(kwargs)
if self.api_version in kwargs.get("api_definitions", {}) or {}:
self.api_methods = kwargs["api_definitions"][self.api_version]['methods']
elif self.api_version in API_CONFIG:
self.api_methods = API_CONFIG[self.api_version]['methods']
else:
self.api_methods = API_CONFIG[DEFAULT_API_VERSION]['methods']
def __calculate_headers(self, parameters):
headers = parameters.get("default_headers", {})
if parameters.get("model", None):
headers.update({
"Model": parameters["model"]
})
if parameters.get("dev_mode", None) and parameters["dev_mode"] == True:
headers.update({
"Dev-Mode": "true"
})
return headers
def __get_url(self, method):
return self.endpoint + self.api_version + method
def __get_headers(self, override):
"""Returns default HTTP headers."""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": self.user_agent,
"App-Id": self.app_id,
"App-Key": self.app_key
}
headers.update(self.default_headers)
headers.update(override)
return headers
def __api_call(self, url, method, **kwargs):
kwargs['headers'] = self.__get_headers(kwargs['headers'] or {})
response = requests.request(method, url, **kwargs)
return self.__handle_response(response)
def __handle_response(self, response):
"""
Validates HTTP response, if response is correct decode json data and returns dict object.
If response is not correct raise appropriate exception.
:returns: dict or list with response data
:rtype: dict or list
:raises:
infermedica_api.exceptions.BadRequest,
infermedica_api.exceptions.UnauthorizedAccess,
infermedica_api.exceptions.ForbiddenAccess,
infermedica_api.exceptions.ResourceNotFound,
infermedica_api.exceptions.MethodNotAllowed,
infermedica_api.exceptions.ServerError,
infermedica_api.exceptions.ConnectionError
"""
status = response.status_code
content = response.content.decode('utf-8')
if 200 <= status <= 299:
return json.loads(content) if content else {}
elif status == 400:
raise exceptions.BadRequest(response, content)
elif status == 401:
raise exceptions.UnauthorizedAccess(response, content)
elif status == 403:
raise exceptions.ForbiddenAccess(response, content)
elif status == 404:
raise exceptions.ResourceNotFound(response, content)
elif status == 405:
raise exceptions.MethodNotAllowed(response, content)
elif 500 <= status <= 599:
raise exceptions.ServerError(response, content)
else:
raise exceptions.ConnectionError(response, content)
def __get(self, method, params=None, headers=None):
"""Wrapper for a GET API call."""
return self.__api_call(self.__get_url(method), "GET", headers=headers, params=params)
def __post(self, method, data, params=None, headers=None):
"""Wrapper for a GET API call."""
return self.__api_call(self.__get_url(method), "POST", headers=headers, data=data, params=params)
def __get_method(self, name):
try:
return self.api_methods[name]
except KeyError as e:
raise exceptions.MethodNotAvailableInAPIVersion(self.api_version, name)
def __get_interview_id_headers(self, diagnosis_request=None, interview_id=None):
headers = {}
if interview_id:
headers['Interview-Id'] = interview_id
elif isinstance(diagnosis_request, models.Diagnosis) and diagnosis_request.interview_id:
headers['Interview-Id'] = diagnosis_request.interview_id
return headers
def info(self):
"""Makes an API request and returns basic API model information."""
return self.__get(self.__get_method('info'))
def search(self, phrase, sex=None, max_results=8, filters=None, **kwargs):
"""
Makes an API search request and returns list of dicts containing keys: 'id', 'label' and 'type'.
Each dict represent an evidence (symptom, lab test or risk factor).
By default only symptoms are returned, to include other evidence types use filters.
:param phrase: Phrase to look for.
:type phrase: str
:param sex: Sex of the patient 'female' or 'male'.
:type sex: str
:param max_results: Maximum number of result to return, default is 8.
:type max_results: int
:param filters: List of search filters, taken from SEARCH_FILTERS variable.
:type filters: list
:returns: A List of dicts with 'id' and 'label' keys.
:rtype: list
"""
method = self.__get_method('search')
params = kwargs.pop('params', {})
params.update({
'phrase': phrase,
'max_results': max_results
})
if sex:
params['sex'] = sex
if filters:
if isinstance(filters, (list, tuple)):
params['type'] = filters
elif isinstance(filters, str):
params['type'] = [filters]
for filter in params['type']:
if filter not in SEARCH_FILTERS.ALL:
raise exceptions.InvalidSearchFilter(filter)
return self.__get(method, params=params)
def lookup(self, phrase, sex=None):
"""
Makes an API lookup request and returns evidence details object.
:param phrase: Phrase to look for.
:type phrase: str
:param sex: Sex of the patient 'female' or 'male'.
:type sex: str
:returns: Dictionary with details.
:rtype: dict
"""
method = self.__get_method('lookup')
params = {
'phrase': phrase
}
if sex:
params['sex'] = sex
return self.__get(method, params=params)
def suggest(self, diagnosis_request, max_results=8, interview_id=None, **kwargs):
"""
Makes an API suggest request and returns a list of suggested evidence.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:returns: A list of suggestions, dicts with 'id', 'name' and 'common_name' keys.
:rtype: list
"""
method = self.__get_method('suggest')
headers = self.__get_interview_id_headers(interview_id=interview_id)
params = kwargs.pop('params', {})
params.update({'max_results': max_results})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
return self.__post(
method,
params=params,
headers=headers,
data=json.dumps(request)
)
def parse(self, text, include_tokens=False, interview_id=None, **kwargs):
"""
Makes an parse API request with provided text and include_tokens parameter.
Returns parse results with detailed list of mentions found in the text.
:param phrase: Text to parse.
:type phrase: str
:param include_tokens: Switch to manipulate the include_tokens parameter.
:type include_tokens: bool
:returns: A ParseResults object
:rtype: :class:`infermedica_api.models.ParseResults`
"""
method = self.__get_method('parse')
headers = self.__get_interview_id_headers(interview_id=interview_id)
params = kwargs.pop('params', {})
request = {
'text': text,
'include_tokens': include_tokens
}
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.ParseResults.from_json(response)
def diagnosis(self, diagnosis_request, interview_id=None, **kwargs):
"""
Makes a diagnosis API request with provided diagnosis data
and returns diagnosis question with possible conditions.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param interview_id: Unique interview id for diagnosis
:type interview_id: str
:returns: A Diagnosis object with api response
:rtype: :class:`infermedica_api.models.Diagnosis`
"""
method = self.__get_method('diagnosis')
headers = self.__get_interview_id_headers(
diagnosis_request=diagnosis_request,
interview_id=interview_id
)
params = kwargs.pop('params', {})
if isinstance(diagnosis_request, models.Diagnosis):
response = self.__post(
method,
json.dumps(diagnosis_request.get_api_request()),
params=params,
headers=headers
)
diagnosis_request.update_from_api(response)
return diagnosis_request
return self.__post(
method,
json.dumps(diagnosis_request),
params=params,
headers=headers
)
def explain(self, diagnosis_request, target_id, interview_id=None, **kwargs):
"""
Makes an explain API request with provided diagnosis data and target condition.
Returns explain results with supporting and conflicting evidences.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param target_id: Condition id for which explain shall be calculated.
:type target_id: str
:returns: A Diagnosis object with api response
:rtype: :class:`infermedica_api.models.Diagnosis`
"""
method = self.__get_method('explain')
headers = self.__get_interview_id_headers(diagnosis_request=diagnosis_request, interview_id=interview_id)
params = kwargs.pop('params', {})
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_explain_request(target_id)
else:
request = dict(diagnosis_request, **{'target': target_id})
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.ExplainResults.from_json(response)
def triage(self, diagnosis_request, interview_id=None, **kwargs):
"""
Makes a triage API request with provided diagnosis data.
Returns triage results dict.
See the docs: https://developer.infermedica.com/docs/triage.
:param diagnosis_request: Diagnosis request object or json request for diagnosis method.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:returns: A dict object with api response
:rtype: dict
"""
method = self.__get_method('triage')
headers = self.__get_interview_id_headers(diagnosis_request=diagnosis_request, interview_id=interview_id)
params = kwargs.pop('params', {})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
return self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
def condition_details(self, _id):
"""
Makes an API request and returns condition details object.
:param _id: Condition id
:type _id: str
:returns:A Condition object
:rtype: :class:`infermedica_api.models.Condition`
"""
method = self.__get_method('condition_details')
response = self.__get(method.format(**{'id': _id}))
return models.Condition.from_json(response)
def conditions_list(self):
"""
Makes an API request and returns list of condition details objects.
:returns: A ConditionList list object with Condition objects
:rtype: :class:`infermedica_api.models.ConditionList`
"""
response = self.__get(self.__get_method('conditions'))
return models.ConditionList.from_json(response)
def symptom_details(self, _id):
"""
Makes an API request and returns symptom details object.
:param _id: Symptom id
:type _id: str
:returns: A Symptom object
:rtype: :class:`infermedica_api.models.Symptom`
"""
method = self.__get_method('symptom_details')
response = self.__get(method.format(**{'id': _id}))
return models.Symptom.from_json(response)
def symptoms_list(self):
"""
Makes an API request and returns list of symptom details objects.
:returns: A SymptomList list object with Symptom objects
:rtype: :class:`infermedica_api.models.SymptomList`
"""
response = self.__get(self.__get_method('symptoms'))
return models.SymptomList.from_json(response)
def lab_test_details(self, _id):
"""
Makes an API request and returns lab_test details object.
:param _id: LabTest id
:type _id: str
:returns: A LabTest object
:rtype: :class:`infermedica_api.models.LabTest`
"""
method = self.__get_method('lab_test_details')
response = self.__get(method.format(**{'id': _id}))
return models.LabTest.from_json(response)
def lab_tests_list(self):
"""
Makes an API request and returns list of lab_test details objects.
:returns: A LabTestList list object with LabTest objects
:rtype: :class:`infermedica_api.models.LabTestList`
"""
response = self.__get(self.__get_method('lab_tests'))
return models.LabTestList.from_json(response)
def risk_factor_details(self, _id):
"""
Makes an API request and returns risk factor details object.
:param _id: risk factor id
:type _id: str
:returns: A RiskFactor object
:rtype: :class:`infermedica_api.models.RiskFactor`
"""
method = self.__get_method('risk_factor_details')
response = self.__get(method.format(**{'id': _id}))
return models.RiskFactor.from_json(response)
def risk_factors_list(self):
"""
Makes an API request and returns list of risk factors details objects.
:returns: A RiskFactorList list object with RiskFactor objects
:rtype: :class:`infermedica_api.models.RiskFactorList`
"""
response = self.__get(self.__get_method('risk_factors'))
return models.RiskFactorList.from_json(response)
def red_flags(self, diagnosis_request, max_results=8, interview_id=None, **kwargs):
"""
Makes an API request with provided diagnosis data and returns a list
of evidence that may be related to potentially life-threatening
conditions.
:param diagnosis_request: Diagnosis request object or diagnosis json.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param interview_id: Unique interview id for diagnosis
:type interview_id: str
:returns: A list of RedFlag objects
:rtype: :class:`infermedica_api.models.RedFlagList`
"""
method = self.__get_method('red_flags')
headers = self.__get_interview_id_headers(
diagnosis_request=diagnosis_request,
interview_id=interview_id,
)
params = kwargs.pop('params', {})
params.update({'max_results': max_results})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.RedFlagList.from_json(response)
def rationale(self, diagnosis_request, interview_id=None, **kwargs):
"""
Makes an API request with provided diagnosis data and returns
an explenation of why the given question has been selected by
the reasoning engine.
:param diagnosis_request: Diagnosis request object or diagnosis json.
:type diagnosis_request: :class:`infermedica_api.models.Diagnosis` or dict
:param interview_id: Unique interview id for diagnosis
:type interview_id: str
:returns: An instance of the RationaleResult
:rtype: :class:`infermedica_api.models.RationaleResult`
"""
method = self.__get_method('rationale')
headers = self.__get_interview_id_headers(
diagnosis_request=diagnosis_request,
interview_id=interview_id,
)
params = kwargs.pop('params', {})
request = diagnosis_request
if isinstance(diagnosis_request, models.Diagnosis):
request = diagnosis_request.get_api_request()
response = self.__post(
method,
json.dumps(request),
params=params,
headers=headers
)
return models.RationaleResult.from_json(response)
__api__ = None
__api_aliased__ = {}
def get_api(alias=None):
"""
Returns global API object and if present,
otherwise raise MissingConfiguration exception.
:param alias: Alias of the API to retrieve
:type alias: str
:returns: An API object
:rtype: :class:`infermedica_api.webservice.API`
:raises: :class:`infermedica_api.exceptions.MissingConfiguration`
"""
global __api__
global __api_aliased__
if isinstance(alias, str):
try:
return __api_aliased__[alias]
except KeyError:
raise exceptions.MissingConfiguration(alias)
if __api__ is None:
raise exceptions.MissingConfiguration()
return __api__
def configure(options=None, **config):
"""
Configure and create new global API object with given configuration.
Configuration can be passed as a dict or separate arguments.
Returns newly created object.
Usage:
>>> import infermedica_api
>>> infermedica_api.configure({'app_id': 'YOUR_APP_ID', 'app_key': 'YOUR_APP_KEY'})
... or:
>>> import infermedica_api
>>> infermedica_api.configure(app_id='YOUR_APP_ID', app_key='YOUR_APP_KEY')
:param options: Dict with configuration data
:type options: dict
:returns: An API object
:rtype: :class:`infermedica_api.webservice.API`
"""
global __api__
global __api_aliased__
configuration = dict(options or {}, **config)
if 'alias' in configuration and isinstance(configuration['alias'], str):
__api_aliased__[configuration['alias']] = API(**configuration)
if configuration.get('default', False):
__api__ = __api_aliased__[configuration['alias']]
return __api_aliased__[configuration['alias']]
__api__ = API(**configuration)
return __api__ | 0.768646 | 0.111822 |
import asyncio
import pathlib
from pathlib import Path
from typing import Dict, List, Optional, Union, cast
from playwright._impl._api_structures import (
Geolocation,
HttpCredentials,
ProxySettings,
ViewportSize,
)
from playwright._impl._api_types import Error
from playwright._impl._browser import Browser, normalize_context_params
from playwright._impl._browser_context import BrowserContext
from playwright._impl._connection import (
ChannelOwner,
Connection,
from_channel,
from_nullable_channel,
)
from playwright._impl._helper import (
ColorScheme,
Env,
ReducedMotion,
locals_to_params,
not_installed_error,
)
from playwright._impl._transport import WebSocketTransport
from playwright._impl._wait_helper import throw_on_timeout
class BrowserType(ChannelOwner):
def __init__(
self, parent: ChannelOwner, type: str, guid: str, initializer: Dict
) -> None:
super().__init__(parent, type, guid, initializer)
def __repr__(self) -> str:
return f"<BrowserType name={self.name} executable_path={self.executable_path}>"
@property
def name(self) -> str:
return self._initializer["name"]
@property
def executable_path(self) -> str:
return self._initializer["executablePath"]
async def launch(
self,
executablePath: Union[str, Path] = None,
channel: str = None,
args: List[str] = None,
ignoreDefaultArgs: Union[bool, List[str]] = None,
handleSIGINT: bool = None,
handleSIGTERM: bool = None,
handleSIGHUP: bool = None,
timeout: float = None,
env: Env = None,
headless: bool = None,
devtools: bool = None,
proxy: ProxySettings = None,
downloadsPath: Union[str, Path] = None,
slowMo: float = None,
tracesDir: Union[pathlib.Path, str] = None,
chromiumSandbox: bool = None,
firefoxUserPrefs: Dict[str, Union[str, float, bool]] = None,
) -> Browser:
params = locals_to_params(locals())
normalize_launch_params(params)
try:
return from_channel(await self._channel.send("launch", params))
except Exception as e:
if "because executable doesn't exist" in str(e):
raise not_installed_error(f'"{self.name}" browser was not found.')
raise e
async def launch_persistent_context(
self,
userDataDir: Union[str, Path],
channel: str = None,
executablePath: Union[str, Path] = None,
args: List[str] = None,
ignoreDefaultArgs: Union[bool, List[str]] = None,
handleSIGINT: bool = None,
handleSIGTERM: bool = None,
handleSIGHUP: bool = None,
timeout: float = None,
env: Env = None,
headless: bool = None,
devtools: bool = None,
proxy: ProxySettings = None,
downloadsPath: Union[str, Path] = None,
slowMo: float = None,
viewport: ViewportSize = None,
screen: ViewportSize = None,
noViewport: bool = None,
ignoreHTTPSErrors: bool = None,
javaScriptEnabled: bool = None,
bypassCSP: bool = None,
userAgent: str = None,
locale: str = None,
timezoneId: str = None,
geolocation: Geolocation = None,
permissions: List[str] = None,
extraHTTPHeaders: Dict[str, str] = None,
offline: bool = None,
httpCredentials: HttpCredentials = None,
deviceScaleFactor: float = None,
isMobile: bool = None,
hasTouch: bool = None,
colorScheme: ColorScheme = None,
reducedMotion: ReducedMotion = None,
acceptDownloads: bool = None,
tracesDir: Union[pathlib.Path, str] = None,
chromiumSandbox: bool = None,
recordHarPath: Union[Path, str] = None,
recordHarOmitContent: bool = None,
recordVideoDir: Union[Path, str] = None,
recordVideoSize: ViewportSize = None,
) -> BrowserContext:
userDataDir = str(Path(userDataDir))
params = locals_to_params(locals())
await normalize_context_params(self._connection._is_sync, params)
normalize_launch_params(params)
try:
context = from_channel(
await self._channel.send("launchPersistentContext", params)
)
context._options = params
return context
except Exception as e:
if "because executable doesn't exist" in str(e):
raise not_installed_error(f'"{self.name}" browser was not found.')
raise e
async def connect_over_cdp(
self,
endpointURL: str,
timeout: float = None,
slow_mo: float = None,
headers: Dict[str, str] = None,
) -> Browser:
params = locals_to_params(locals())
params["sdkLanguage"] = (
"python" if self._connection._is_sync else "python-async"
)
response = await self._channel.send_return_as_dict("connectOverCDP", params)
browser = cast(Browser, from_channel(response["browser"]))
browser._is_remote = True
default_context = cast(
Optional[BrowserContext],
from_nullable_channel(response.get("defaultContext")),
)
if default_context:
browser._contexts.append(default_context)
default_context._browser = browser
return browser
async def connect(
self,
ws_endpoint: str,
timeout: float = None,
slow_mo: float = None,
headers: Dict[str, str] = None,
) -> Browser:
if timeout is None:
timeout = 30000
transport = WebSocketTransport(
self._connection._loop, ws_endpoint, headers, slow_mo
)
connection = Connection(
self._connection._dispatcher_fiber,
self._connection._object_factory,
transport,
)
connection._is_sync = self._connection._is_sync
connection._loop = self._connection._loop
connection._loop.create_task(connection.run())
future = connection._loop.create_task(
connection.wait_for_object_with_known_name("Playwright")
)
timeout_future = throw_on_timeout(timeout, Error("Connection timed out"))
done, pending = await asyncio.wait(
{transport.on_error_future, future, timeout_future},
return_when=asyncio.FIRST_COMPLETED,
)
if not future.done():
future.cancel()
if not timeout_future.done():
timeout_future.cancel()
playwright = next(iter(done)).result()
self._connection._child_ws_connections.append(connection)
pre_launched_browser = playwright._initializer.get("preLaunchedBrowser")
assert pre_launched_browser
browser = cast(Browser, from_channel(pre_launched_browser))
browser._is_remote = True
browser._is_connected_over_websocket = True
def handle_transport_close() -> None:
for context in browser.contexts:
for page in context.pages:
page._on_close()
context._on_close()
browser._on_close()
transport.once("close", handle_transport_close)
return browser
def normalize_launch_params(params: Dict) -> None:
if "env" in params:
params["env"] = [
{"name": name, "value": str(value)}
for [name, value] in params["env"].items()
]
if "ignoreDefaultArgs" in params:
if params["ignoreDefaultArgs"] is True:
params["ignoreAllDefaultArgs"] = True
del params["ignoreDefaultArgs"]
if "executablePath" in params:
params["executablePath"] = str(Path(params["executablePath"]))
if "downloadsPath" in params:
params["downloadsPath"] = str(Path(params["downloadsPath"])) | playwright/_impl/_browser_type.py |
import asyncio
import pathlib
from pathlib import Path
from typing import Dict, List, Optional, Union, cast
from playwright._impl._api_structures import (
Geolocation,
HttpCredentials,
ProxySettings,
ViewportSize,
)
from playwright._impl._api_types import Error
from playwright._impl._browser import Browser, normalize_context_params
from playwright._impl._browser_context import BrowserContext
from playwright._impl._connection import (
ChannelOwner,
Connection,
from_channel,
from_nullable_channel,
)
from playwright._impl._helper import (
ColorScheme,
Env,
ReducedMotion,
locals_to_params,
not_installed_error,
)
from playwright._impl._transport import WebSocketTransport
from playwright._impl._wait_helper import throw_on_timeout
class BrowserType(ChannelOwner):
def __init__(
self, parent: ChannelOwner, type: str, guid: str, initializer: Dict
) -> None:
super().__init__(parent, type, guid, initializer)
def __repr__(self) -> str:
return f"<BrowserType name={self.name} executable_path={self.executable_path}>"
@property
def name(self) -> str:
return self._initializer["name"]
@property
def executable_path(self) -> str:
return self._initializer["executablePath"]
async def launch(
self,
executablePath: Union[str, Path] = None,
channel: str = None,
args: List[str] = None,
ignoreDefaultArgs: Union[bool, List[str]] = None,
handleSIGINT: bool = None,
handleSIGTERM: bool = None,
handleSIGHUP: bool = None,
timeout: float = None,
env: Env = None,
headless: bool = None,
devtools: bool = None,
proxy: ProxySettings = None,
downloadsPath: Union[str, Path] = None,
slowMo: float = None,
tracesDir: Union[pathlib.Path, str] = None,
chromiumSandbox: bool = None,
firefoxUserPrefs: Dict[str, Union[str, float, bool]] = None,
) -> Browser:
params = locals_to_params(locals())
normalize_launch_params(params)
try:
return from_channel(await self._channel.send("launch", params))
except Exception as e:
if "because executable doesn't exist" in str(e):
raise not_installed_error(f'"{self.name}" browser was not found.')
raise e
async def launch_persistent_context(
self,
userDataDir: Union[str, Path],
channel: str = None,
executablePath: Union[str, Path] = None,
args: List[str] = None,
ignoreDefaultArgs: Union[bool, List[str]] = None,
handleSIGINT: bool = None,
handleSIGTERM: bool = None,
handleSIGHUP: bool = None,
timeout: float = None,
env: Env = None,
headless: bool = None,
devtools: bool = None,
proxy: ProxySettings = None,
downloadsPath: Union[str, Path] = None,
slowMo: float = None,
viewport: ViewportSize = None,
screen: ViewportSize = None,
noViewport: bool = None,
ignoreHTTPSErrors: bool = None,
javaScriptEnabled: bool = None,
bypassCSP: bool = None,
userAgent: str = None,
locale: str = None,
timezoneId: str = None,
geolocation: Geolocation = None,
permissions: List[str] = None,
extraHTTPHeaders: Dict[str, str] = None,
offline: bool = None,
httpCredentials: HttpCredentials = None,
deviceScaleFactor: float = None,
isMobile: bool = None,
hasTouch: bool = None,
colorScheme: ColorScheme = None,
reducedMotion: ReducedMotion = None,
acceptDownloads: bool = None,
tracesDir: Union[pathlib.Path, str] = None,
chromiumSandbox: bool = None,
recordHarPath: Union[Path, str] = None,
recordHarOmitContent: bool = None,
recordVideoDir: Union[Path, str] = None,
recordVideoSize: ViewportSize = None,
) -> BrowserContext:
userDataDir = str(Path(userDataDir))
params = locals_to_params(locals())
await normalize_context_params(self._connection._is_sync, params)
normalize_launch_params(params)
try:
context = from_channel(
await self._channel.send("launchPersistentContext", params)
)
context._options = params
return context
except Exception as e:
if "because executable doesn't exist" in str(e):
raise not_installed_error(f'"{self.name}" browser was not found.')
raise e
async def connect_over_cdp(
self,
endpointURL: str,
timeout: float = None,
slow_mo: float = None,
headers: Dict[str, str] = None,
) -> Browser:
params = locals_to_params(locals())
params["sdkLanguage"] = (
"python" if self._connection._is_sync else "python-async"
)
response = await self._channel.send_return_as_dict("connectOverCDP", params)
browser = cast(Browser, from_channel(response["browser"]))
browser._is_remote = True
default_context = cast(
Optional[BrowserContext],
from_nullable_channel(response.get("defaultContext")),
)
if default_context:
browser._contexts.append(default_context)
default_context._browser = browser
return browser
async def connect(
self,
ws_endpoint: str,
timeout: float = None,
slow_mo: float = None,
headers: Dict[str, str] = None,
) -> Browser:
if timeout is None:
timeout = 30000
transport = WebSocketTransport(
self._connection._loop, ws_endpoint, headers, slow_mo
)
connection = Connection(
self._connection._dispatcher_fiber,
self._connection._object_factory,
transport,
)
connection._is_sync = self._connection._is_sync
connection._loop = self._connection._loop
connection._loop.create_task(connection.run())
future = connection._loop.create_task(
connection.wait_for_object_with_known_name("Playwright")
)
timeout_future = throw_on_timeout(timeout, Error("Connection timed out"))
done, pending = await asyncio.wait(
{transport.on_error_future, future, timeout_future},
return_when=asyncio.FIRST_COMPLETED,
)
if not future.done():
future.cancel()
if not timeout_future.done():
timeout_future.cancel()
playwright = next(iter(done)).result()
self._connection._child_ws_connections.append(connection)
pre_launched_browser = playwright._initializer.get("preLaunchedBrowser")
assert pre_launched_browser
browser = cast(Browser, from_channel(pre_launched_browser))
browser._is_remote = True
browser._is_connected_over_websocket = True
def handle_transport_close() -> None:
for context in browser.contexts:
for page in context.pages:
page._on_close()
context._on_close()
browser._on_close()
transport.once("close", handle_transport_close)
return browser
def normalize_launch_params(params: Dict) -> None:
if "env" in params:
params["env"] = [
{"name": name, "value": str(value)}
for [name, value] in params["env"].items()
]
if "ignoreDefaultArgs" in params:
if params["ignoreDefaultArgs"] is True:
params["ignoreAllDefaultArgs"] = True
del params["ignoreDefaultArgs"]
if "executablePath" in params:
params["executablePath"] = str(Path(params["executablePath"]))
if "downloadsPath" in params:
params["downloadsPath"] = str(Path(params["downloadsPath"])) | 0.794026 | 0.124559 |
import argparse
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from matplotlib import pyplot as plt
from tensorflow import keras
from data import dateset_fashion_mnist
from model import VAE
parser = argparse.ArgumentParser()
parser.add_argument('--auther', help='inner batch size', default="TuringEmmy", type=str)
parser.add_argument('--DATA', help='inner batch size', default=None, type=str)
parser.add_argument('--IMAGE', help='inner batch size', default=None, type=str)
parser.add_argument('--BATCH_SIZE', help='inner batch size', default=100, type=int)
parser.add_argument('--EPOCHS', help='inner batch size', default=2, type=int)
parser.add_argument('--LEARNING_RATE', help='inner batch size', default=1e-3, type=float)
parser.add_argument('--IS_DRAW', help='weather to show graph', action='store_true', default=False)
args = parser.parse_args()
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
# image grid
new_im = Image.new('L', (280, 280))
image_size = 28 * 28
h_dim = 512
z_dim = 20
model = VAE()
model.build(input_shape=(4, image_size))
model.summary()
optimizer = keras.optimizers.Adam(args.LEARNING_RATE)
dataset, num_batches = dateset_fashion_mnist(args)
for epoch in range(args.EPOCHS):
for step, x in enumerate(dataset):
x = tf.reshape(x, [-1, image_size])
with tf.GradientTape() as tape:
# Forward pass
x_reconstruction_logits, mu, log_var = model(x)
# Compute reconstruction loss and kl divergence
# For KL divergence, see Appendix B in VAE paper or http://yunjey47.tistory.com/43
# Scaled by `image_size` for each individual pixel.
reconstruction_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=x, logits=x_reconstruction_logits)
reconstruction_loss = tf.reduce_sum(reconstruction_loss) / args.BATCH_SIZE
# please refer to
# https://stats.stackexchange.com/questions/7440/kl-divergence-between-two-univariate-gaussians
kl_div = - 0.5 * tf.reduce_sum(1. + log_var - tf.square(mu) - tf.exp(log_var), axis=-1)
kl_div = tf.reduce_mean(kl_div)
# Backprop and optimize
loss = tf.reduce_mean(reconstruction_loss) + kl_div
gradients = tape.gradient(loss, model.trainable_variables)
for g in gradients:
tf.clip_by_norm(g, 15)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if (step + 1) % 50 == 0:
print("Epoch[{}/{}], Step [{}/{}], Reconst Loss: {:.4f}, KL Div: {:.4f}"
.format(epoch + 1, args.EPOCHS, step + 1, num_batches, float(reconstruction_loss), float(kl_div)))
# Generative model
z = tf.random.normal((args.BATCH_SIZE, z_dim))
out = model.decode(z) # decode with sigmoid
out = tf.reshape(out, [-1, 28, 28]).numpy() * 255
out = out.astype(np.uint8)
# since we can not find image_grid function from vesion 2.0
# we do it by hand.
index = 0
for i in range(0, 280, 28):
for j in range(0, 280, 28):
im = out[index]
im = Image.fromarray(im, mode='L')
new_im.paste(im, (i, j))
index += 1
if not os.path.exists(args.IMAGE):
os.mkdir(args.IMAGE)
new_im.save(args.IMAGE + '/vae_sampled_epoch_%d.png' % (epoch + 1))
if args.IS_DRAW:
plt.imshow(np.asarray(new_im))
plt.show()
# Save the reconstructed images of last batch
out_logits, _, _ = model(x[:args.BATCH_SIZE // 2])
out = tf.nn.sigmoid(out_logits) # out is just the logits, use sigmoid
out = tf.reshape(out, [-1, 28, 28]).numpy() * 255
x = tf.reshape(x[:args.BATCH_SIZE // 2], [-1, 28, 28])
x_concat = tf.concat([x, out], axis=0).numpy() * 255.
x_concat = x_concat.astype(np.uint8)
index = 0
for i in range(0, 280, 28):
for j in range(0, 280, 28):
im = x_concat[index]
im = Image.fromarray(im, mode='L')
new_im.paste(im, (i, j))
index += 1
new_im.save(args.IMAGE + '/vae_reconstructed_epoch_%d.png' % (epoch + 1))
if args.IS_DRAW:
plt.imshow(np.asarray(new_im))
plt.show()
print('New images saved !') | VaritionalAutoEncoders.py | import argparse
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from matplotlib import pyplot as plt
from tensorflow import keras
from data import dateset_fashion_mnist
from model import VAE
parser = argparse.ArgumentParser()
parser.add_argument('--auther', help='inner batch size', default="TuringEmmy", type=str)
parser.add_argument('--DATA', help='inner batch size', default=None, type=str)
parser.add_argument('--IMAGE', help='inner batch size', default=None, type=str)
parser.add_argument('--BATCH_SIZE', help='inner batch size', default=100, type=int)
parser.add_argument('--EPOCHS', help='inner batch size', default=2, type=int)
parser.add_argument('--LEARNING_RATE', help='inner batch size', default=1e-3, type=float)
parser.add_argument('--IS_DRAW', help='weather to show graph', action='store_true', default=False)
args = parser.parse_args()
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
# image grid
new_im = Image.new('L', (280, 280))
image_size = 28 * 28
h_dim = 512
z_dim = 20
model = VAE()
model.build(input_shape=(4, image_size))
model.summary()
optimizer = keras.optimizers.Adam(args.LEARNING_RATE)
dataset, num_batches = dateset_fashion_mnist(args)
for epoch in range(args.EPOCHS):
for step, x in enumerate(dataset):
x = tf.reshape(x, [-1, image_size])
with tf.GradientTape() as tape:
# Forward pass
x_reconstruction_logits, mu, log_var = model(x)
# Compute reconstruction loss and kl divergence
# For KL divergence, see Appendix B in VAE paper or http://yunjey47.tistory.com/43
# Scaled by `image_size` for each individual pixel.
reconstruction_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=x, logits=x_reconstruction_logits)
reconstruction_loss = tf.reduce_sum(reconstruction_loss) / args.BATCH_SIZE
# please refer to
# https://stats.stackexchange.com/questions/7440/kl-divergence-between-two-univariate-gaussians
kl_div = - 0.5 * tf.reduce_sum(1. + log_var - tf.square(mu) - tf.exp(log_var), axis=-1)
kl_div = tf.reduce_mean(kl_div)
# Backprop and optimize
loss = tf.reduce_mean(reconstruction_loss) + kl_div
gradients = tape.gradient(loss, model.trainable_variables)
for g in gradients:
tf.clip_by_norm(g, 15)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if (step + 1) % 50 == 0:
print("Epoch[{}/{}], Step [{}/{}], Reconst Loss: {:.4f}, KL Div: {:.4f}"
.format(epoch + 1, args.EPOCHS, step + 1, num_batches, float(reconstruction_loss), float(kl_div)))
# Generative model
z = tf.random.normal((args.BATCH_SIZE, z_dim))
out = model.decode(z) # decode with sigmoid
out = tf.reshape(out, [-1, 28, 28]).numpy() * 255
out = out.astype(np.uint8)
# since we can not find image_grid function from vesion 2.0
# we do it by hand.
index = 0
for i in range(0, 280, 28):
for j in range(0, 280, 28):
im = out[index]
im = Image.fromarray(im, mode='L')
new_im.paste(im, (i, j))
index += 1
if not os.path.exists(args.IMAGE):
os.mkdir(args.IMAGE)
new_im.save(args.IMAGE + '/vae_sampled_epoch_%d.png' % (epoch + 1))
if args.IS_DRAW:
plt.imshow(np.asarray(new_im))
plt.show()
# Save the reconstructed images of last batch
out_logits, _, _ = model(x[:args.BATCH_SIZE // 2])
out = tf.nn.sigmoid(out_logits) # out is just the logits, use sigmoid
out = tf.reshape(out, [-1, 28, 28]).numpy() * 255
x = tf.reshape(x[:args.BATCH_SIZE // 2], [-1, 28, 28])
x_concat = tf.concat([x, out], axis=0).numpy() * 255.
x_concat = x_concat.astype(np.uint8)
index = 0
for i in range(0, 280, 28):
for j in range(0, 280, 28):
im = x_concat[index]
im = Image.fromarray(im, mode='L')
new_im.paste(im, (i, j))
index += 1
new_im.save(args.IMAGE + '/vae_reconstructed_epoch_%d.png' % (epoch + 1))
if args.IS_DRAW:
plt.imshow(np.asarray(new_im))
plt.show()
print('New images saved !') | 0.748536 | 0.370795 |
import re
import os
from fabkit import api, run, sudo, filer, env, user
from fablib import git
from fablib.base import SimpleBase
class Python(SimpleBase):
def __init__(self, prefix='/usr'):
self.prefix = prefix
self.packages = {
'CentOS Linux 7.*': [
'python-devel',
'libxml2-devel',
'libxslt-devel',
'libffi-devel',
'postgresql-devel',
'openssl-devel',
'blas-devel',
'lapack-devel',
'atlas-devel',
'gcc',
'gcc-gfortran',
'wget',
],
'Ubuntu 14.*': [
'python-dev',
'libxml2-dev',
'libxslt-dev',
'libffi-dev',
'libssl-dev',
'libblas-dev',
'liblapack-dev',
'libatlas-dev',
'gcc',
'gfortran',
'wget',
]
}
def get_prefix(self):
return self.prefix
def get_cmd(self):
return '{0}/bin/python'.format(self.prefix) # noqa
def setup(self):
"""
easy_install, pipをインストールします。
また、pipはパッケージインストール時にソースからコンパイルするため、
いくつかのdevelパッケージをインストールしておきます。
"""
self.init()
git.setup()
self.install_packages()
with api.warn_only():
result = run('which easy_install')
if result.return_code != 0:
sudo('sh -c "cd /tmp/ && wget https://bootstrap.pypa.io/ez_setup.py -O - | python"')
with api.warn_only():
result = run('which pip')
if result.return_code != 0:
sudo('easy_install pip')
if self.prefix != '/usr':
sudo('pip install virtualenv')
if not filer.exists(self.prefix):
sudo('virtualenv {0} --system-site-packages'.format(self.prefix))
def install(self, package_name=None, requirements=None):
if package_name:
sudo("{0}/bin/pip install '{1}'".format(self.prefix, package_name))
elif requirements:
sudo("{0}/bin/pip install -r {1}".format(self.prefix, requirements))
def pip_show(self, package_name):
"""
pip show [package_name] の結果をパースして、タプル形式で返します。
"""
with api.warn_only():
result = run('pip show {0}'.format(package_name))
if result == '':
return None
RE_NAME = re.compile('Name: (.+)\r')
RE_VERSION = re.compile('Version: (.+)\r')
finded_name = RE_NAME.findall(result)
if len(finded_name) == 0:
return None
name = finded_name[0]
finded_version = RE_VERSION.findall(result)
if len(finded_version) == 0:
return None
version = finded_version[0]
return (name, version)
def setup_package(self, name, git_repos=[], exec_user='root', branch=None, is_develop=False,
mk_links=[], mk_dirs=[], cp_files=[], services=[], requirements=[], **kwargs):
user.add(exec_user)
for git_repo in git_repos:
git_dir = os.path.join(self.prefix, 'src', git_repo['name'])
git_dir_parent = os.path.dirname(git_dir)
filer.mkdir(git_dir_parent, mode='777')
git.sync(git_repo['url'], branch=git_repo['branch'], dest=git_dir)
requirements_txt = '{0}/requirements.txt'.format(git_dir)
if filer.exists(requirements_txt):
self.install(requirements=requirements_txt)
if is_develop:
sudo('sh -c "cd {0} && {1}/bin/python setup.py develop"'.format(
git_dir, self.prefix))
else:
sudo('sh -c "cd {0} && {1}/bin/python setup.py install"'.format(
git_dir, self.prefix))
for mkdir in mk_dirs:
filer.mkdir(mkdir['path'], owner=mkdir.get('owner', exec_user))
for cpfile in cp_files:
if filer.exists(cpfile['dest']):
continue
sudo('cp -r {0} {1}'.format(
os.path.join(git_dir, cpfile['src']), cpfile['dest']))
for service in services:
service['user'] = exec_user
filer.template(
'/etc/systemd/system/{0}.service'.format(service['name']),
src='systemd.service.j2', mode='755',
data=service)
for requirement in requirements:
self.install(requirement)
for link in mk_links:
if not filer.exists(link['dest']):
sudo('ln -s {0} {1}'.format(link['src'], link['dest']))
def get_site_packages(self):
return run('{0}/bin/python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()"'.format(self.prefix)) # noqa | __init__.py |
import re
import os
from fabkit import api, run, sudo, filer, env, user
from fablib import git
from fablib.base import SimpleBase
class Python(SimpleBase):
def __init__(self, prefix='/usr'):
self.prefix = prefix
self.packages = {
'CentOS Linux 7.*': [
'python-devel',
'libxml2-devel',
'libxslt-devel',
'libffi-devel',
'postgresql-devel',
'openssl-devel',
'blas-devel',
'lapack-devel',
'atlas-devel',
'gcc',
'gcc-gfortran',
'wget',
],
'Ubuntu 14.*': [
'python-dev',
'libxml2-dev',
'libxslt-dev',
'libffi-dev',
'libssl-dev',
'libblas-dev',
'liblapack-dev',
'libatlas-dev',
'gcc',
'gfortran',
'wget',
]
}
def get_prefix(self):
return self.prefix
def get_cmd(self):
return '{0}/bin/python'.format(self.prefix) # noqa
def setup(self):
"""
easy_install, pipをインストールします。
また、pipはパッケージインストール時にソースからコンパイルするため、
いくつかのdevelパッケージをインストールしておきます。
"""
self.init()
git.setup()
self.install_packages()
with api.warn_only():
result = run('which easy_install')
if result.return_code != 0:
sudo('sh -c "cd /tmp/ && wget https://bootstrap.pypa.io/ez_setup.py -O - | python"')
with api.warn_only():
result = run('which pip')
if result.return_code != 0:
sudo('easy_install pip')
if self.prefix != '/usr':
sudo('pip install virtualenv')
if not filer.exists(self.prefix):
sudo('virtualenv {0} --system-site-packages'.format(self.prefix))
def install(self, package_name=None, requirements=None):
if package_name:
sudo("{0}/bin/pip install '{1}'".format(self.prefix, package_name))
elif requirements:
sudo("{0}/bin/pip install -r {1}".format(self.prefix, requirements))
def pip_show(self, package_name):
"""
pip show [package_name] の結果をパースして、タプル形式で返します。
"""
with api.warn_only():
result = run('pip show {0}'.format(package_name))
if result == '':
return None
RE_NAME = re.compile('Name: (.+)\r')
RE_VERSION = re.compile('Version: (.+)\r')
finded_name = RE_NAME.findall(result)
if len(finded_name) == 0:
return None
name = finded_name[0]
finded_version = RE_VERSION.findall(result)
if len(finded_version) == 0:
return None
version = finded_version[0]
return (name, version)
def setup_package(self, name, git_repos=[], exec_user='root', branch=None, is_develop=False,
mk_links=[], mk_dirs=[], cp_files=[], services=[], requirements=[], **kwargs):
user.add(exec_user)
for git_repo in git_repos:
git_dir = os.path.join(self.prefix, 'src', git_repo['name'])
git_dir_parent = os.path.dirname(git_dir)
filer.mkdir(git_dir_parent, mode='777')
git.sync(git_repo['url'], branch=git_repo['branch'], dest=git_dir)
requirements_txt = '{0}/requirements.txt'.format(git_dir)
if filer.exists(requirements_txt):
self.install(requirements=requirements_txt)
if is_develop:
sudo('sh -c "cd {0} && {1}/bin/python setup.py develop"'.format(
git_dir, self.prefix))
else:
sudo('sh -c "cd {0} && {1}/bin/python setup.py install"'.format(
git_dir, self.prefix))
for mkdir in mk_dirs:
filer.mkdir(mkdir['path'], owner=mkdir.get('owner', exec_user))
for cpfile in cp_files:
if filer.exists(cpfile['dest']):
continue
sudo('cp -r {0} {1}'.format(
os.path.join(git_dir, cpfile['src']), cpfile['dest']))
for service in services:
service['user'] = exec_user
filer.template(
'/etc/systemd/system/{0}.service'.format(service['name']),
src='systemd.service.j2', mode='755',
data=service)
for requirement in requirements:
self.install(requirement)
for link in mk_links:
if not filer.exists(link['dest']):
sudo('ln -s {0} {1}'.format(link['src'], link['dest']))
def get_site_packages(self):
return run('{0}/bin/python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()"'.format(self.prefix)) # noqa | 0.246624 | 0.067209 |
from soa import token
EOF_CHAR = chr(0)
class Lexer():
"Lexer is the main class for lexing"
def __init__(self, input_text):
self.input_text = input_text + EOF_CHAR
self.pos = 0
self.buffer = []
self.tokens = []
def next_char(self):
"next moves the position over and returns the next character"
if self.pos >= len(self.input_text):
return chr(0)
char = self.input_text[self.pos]
self.pos += 1
self.buffer.append(char)
return char
def backup(self):
"backup removes the last character from the buffer and sets the position back 1"
self.pos -= 1
self.buffer = self.buffer[:-1]
def ignore(self):
"ignore resets the buffer"
self.buffer = []
def peek(self):
"peek returns the next character without moving the position or addng to the buffer"
char = self.next_char()
self.backup()
return char
def emit_with_value(self, token_type, value):
"emitWithValue adds a new token to the tokens array"
self.tokens.append({
"Pos": self.pos,
"Typ": token_type,
"Val": value
})
def emit(self, token_type):
"emit uses the buffer when adding to the tokens array"
self.emit_with_value(token_type, "".join(self.buffer))
def follow(self, expected):
"follow determines if the expected string matches the input_text"
if len(expected) > len(self.input_text[self.pos:]):
return False
got = "".join(self.input_text[self.pos:self.pos + len(expected)])
return got == expected
def forward(self, times):
"forward moves the position forward and adds to the buffer times times"
for _ in range(times):
self.next_char()
def lex_main(self):
"lex_main is the main loop for the lexer"
while True:
next_char = self.peek()
if next_char == "#":
return self.lex_comment
if self.follow("set"):
return self.lex_set
elif self.follow("out"):
return self.lex_out
elif self.follow("add"):
return self.lex_add
elif self.follow("exit"):
return self.lex_exit
elif self.follow("if"):
return self.lex_if
elif self.follow("fi"):
return self.lex_fi
if token.is_eol(next_char):
return self.lex_eol
if next_char == EOF_CHAR:
self.ignore()
self.emit(token.EOF)
break
self.next_char()
def lex_values(self):
"lex_values lexes registers and ints"
while True:
next_char = self.peek()
if next_char == "R":
return self.lex_register
elif next_char == "#":
return self.lex_comment
if token.is_digit(next_char):
return self.lex_int
if token.is_eol(next_char):
return self.lex_eol
if next_char == EOF_CHAR:
self.ignore()
self.emit(token.EOF)
break
self.next_char()
def lex_comment(self):
"lex_comment just ignores the rest of the line"
x = self.peek()
while x != EOF_CHAR:
self.next_char()
x = self.peek()
self.ignore()
return self.lex_main
def lex_register(self):
"lex_register produces a register token"
self.ignore()
self.next_char()
x = self.peek()
while token.is_digit(x):
self.next_char()
x = self.peek()
self.emit(token.REGISTER)
return self.lex_values
def lex_set(self):
"lex_set produces a set token"
self.ignore()
self.forward(3)
self.emit(token.SET)
return self.lex_values
def lex_out(self):
"lex_out produces an out token"
self.ignore()
self.forward(3)
self.emit(token.OUT)
return self.lex_values
def lex_add(self):
"lex_add produces an add token"
self.ignore()
self.forward(3)
self.emit(token.ADD)
return self.lex_values
def lex_if(self):
"lex_if produces an if token"
self.ignore()
self.forward(2)
self.emit(token.IF)
return self.lex_values
def lex_fi(self):
"lex_fi produces a fi token"
self.ignore()
self.forward(2)
self.emit(token.FI)
return self.lex_main
def lex_exit(self):
"lex_exit produces an exit token"
self.ignore()
self.forward(4)
self.emit(token.EXIT)
return self.lex_values
def lex_int(self):
"lex_int produces an int token"
self.ignore()
x = self.peek()
while token.is_digit(x):
self.next_char()
x = self.peek()
self.emit(token.INT)
return self.lex_values
def lex_eol(self):
"lex_eol produces an eol token"
self.ignore()
if self.next_char() == "\r":
self.next_char()
self.emit(token.EOL)
return self.lex_main
def run(self):
"run starts up the lexer loop"
state = self.lex_main()
while state:
state = state()
def lex_soa(input_text):
"lex_soa creates the lexer and starts it"
lexer = Lexer(input_text)
lexer.run()
return lexer.tokens | py/soa/lexer.py | from soa import token
EOF_CHAR = chr(0)
class Lexer():
"Lexer is the main class for lexing"
def __init__(self, input_text):
self.input_text = input_text + EOF_CHAR
self.pos = 0
self.buffer = []
self.tokens = []
def next_char(self):
"next moves the position over and returns the next character"
if self.pos >= len(self.input_text):
return chr(0)
char = self.input_text[self.pos]
self.pos += 1
self.buffer.append(char)
return char
def backup(self):
"backup removes the last character from the buffer and sets the position back 1"
self.pos -= 1
self.buffer = self.buffer[:-1]
def ignore(self):
"ignore resets the buffer"
self.buffer = []
def peek(self):
"peek returns the next character without moving the position or addng to the buffer"
char = self.next_char()
self.backup()
return char
def emit_with_value(self, token_type, value):
"emitWithValue adds a new token to the tokens array"
self.tokens.append({
"Pos": self.pos,
"Typ": token_type,
"Val": value
})
def emit(self, token_type):
"emit uses the buffer when adding to the tokens array"
self.emit_with_value(token_type, "".join(self.buffer))
def follow(self, expected):
"follow determines if the expected string matches the input_text"
if len(expected) > len(self.input_text[self.pos:]):
return False
got = "".join(self.input_text[self.pos:self.pos + len(expected)])
return got == expected
def forward(self, times):
"forward moves the position forward and adds to the buffer times times"
for _ in range(times):
self.next_char()
def lex_main(self):
"lex_main is the main loop for the lexer"
while True:
next_char = self.peek()
if next_char == "#":
return self.lex_comment
if self.follow("set"):
return self.lex_set
elif self.follow("out"):
return self.lex_out
elif self.follow("add"):
return self.lex_add
elif self.follow("exit"):
return self.lex_exit
elif self.follow("if"):
return self.lex_if
elif self.follow("fi"):
return self.lex_fi
if token.is_eol(next_char):
return self.lex_eol
if next_char == EOF_CHAR:
self.ignore()
self.emit(token.EOF)
break
self.next_char()
def lex_values(self):
"lex_values lexes registers and ints"
while True:
next_char = self.peek()
if next_char == "R":
return self.lex_register
elif next_char == "#":
return self.lex_comment
if token.is_digit(next_char):
return self.lex_int
if token.is_eol(next_char):
return self.lex_eol
if next_char == EOF_CHAR:
self.ignore()
self.emit(token.EOF)
break
self.next_char()
def lex_comment(self):
"lex_comment just ignores the rest of the line"
x = self.peek()
while x != EOF_CHAR:
self.next_char()
x = self.peek()
self.ignore()
return self.lex_main
def lex_register(self):
"lex_register produces a register token"
self.ignore()
self.next_char()
x = self.peek()
while token.is_digit(x):
self.next_char()
x = self.peek()
self.emit(token.REGISTER)
return self.lex_values
def lex_set(self):
"lex_set produces a set token"
self.ignore()
self.forward(3)
self.emit(token.SET)
return self.lex_values
def lex_out(self):
"lex_out produces an out token"
self.ignore()
self.forward(3)
self.emit(token.OUT)
return self.lex_values
def lex_add(self):
"lex_add produces an add token"
self.ignore()
self.forward(3)
self.emit(token.ADD)
return self.lex_values
def lex_if(self):
"lex_if produces an if token"
self.ignore()
self.forward(2)
self.emit(token.IF)
return self.lex_values
def lex_fi(self):
"lex_fi produces a fi token"
self.ignore()
self.forward(2)
self.emit(token.FI)
return self.lex_main
def lex_exit(self):
"lex_exit produces an exit token"
self.ignore()
self.forward(4)
self.emit(token.EXIT)
return self.lex_values
def lex_int(self):
"lex_int produces an int token"
self.ignore()
x = self.peek()
while token.is_digit(x):
self.next_char()
x = self.peek()
self.emit(token.INT)
return self.lex_values
def lex_eol(self):
"lex_eol produces an eol token"
self.ignore()
if self.next_char() == "\r":
self.next_char()
self.emit(token.EOL)
return self.lex_main
def run(self):
"run starts up the lexer loop"
state = self.lex_main()
while state:
state = state()
def lex_soa(input_text):
"lex_soa creates the lexer and starts it"
lexer = Lexer(input_text)
lexer.run()
return lexer.tokens | 0.709221 | 0.448245 |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from analysis.util import stimulus_names
from sklearn.decomposition import PCA
def stretch_axes(points):
"""
Run PCA.
Stretch axes so as to make the variance along each axis the same.
Do so by dividing the values of the coordinate by the standard deviation of values along that axis.
This way points along each axis will have unit variance. This does not affect the radial
distribution of points, only their distance from the origin in different directions.
"""
n_components = 5
pca = PCA(n_components=n_components)
# obtain the 5 PC directions and project data onto that space
temp = pca.fit_transform(points)
# normalize each axis by its standard deviation to make sd across each axis the same
for i in range(n_components):
temp[:, i] = temp[:, i] / np.std(temp[:, i])
points = temp
return points
def scatterplots_2d_annotated(subject_name, subject_exp_data, pc1=1, pc2=2):
sns.set_style('darkgrid')
stimuli = stimulus_names()
fig, ax = plt.subplots()
plt.scatter(subject_exp_data[:, pc1 - 1], subject_exp_data[:, pc2 - 1], c="#31505A", marker='.')
# add labels to points
label_idx = 0
for x, y in zip(subject_exp_data[:, pc1 - 1], subject_exp_data[:, pc2 - 1]):
plt.annotate(stimuli[label_idx], # this is the text
(x, y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0, 1.5), # distance from text to points (x,y)
size=10,
ha='center') # horizontal alignment can be left, right or center
label_idx += 1
plt.xlabel('Principal Component {}'.format(pc1))
plt.ylabel('Principal Component {}'.format(pc2))
plt.title(subject_name)
plt.axis('square')
ax.set_xlim(-2, 3.5)
ax.set_ylim(-2, 3.5)
plt.show()
if __name__ == '__main__':
PATH_TO_NPY_FILE = input("Path to npy file containing 5D coordinates "
"(e.g., ./sample-materials/subject-data/model-fitting/S7/"
"S7_word_anchored_points_sigma_0.18_dim_5.npy): ")
NAME = input("Subject name or ID (e.g., S7): ")
data = np.load(PATH_TO_NPY_FILE)
data = stretch_axes(data)
scatterplots_2d_annotated(NAME, data) | analysis/perceptual_space_visualizations.py | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from analysis.util import stimulus_names
from sklearn.decomposition import PCA
def stretch_axes(points):
"""
Run PCA.
Stretch axes so as to make the variance along each axis the same.
Do so by dividing the values of the coordinate by the standard deviation of values along that axis.
This way points along each axis will have unit variance. This does not affect the radial
distribution of points, only their distance from the origin in different directions.
"""
n_components = 5
pca = PCA(n_components=n_components)
# obtain the 5 PC directions and project data onto that space
temp = pca.fit_transform(points)
# normalize each axis by its standard deviation to make sd across each axis the same
for i in range(n_components):
temp[:, i] = temp[:, i] / np.std(temp[:, i])
points = temp
return points
def scatterplots_2d_annotated(subject_name, subject_exp_data, pc1=1, pc2=2):
sns.set_style('darkgrid')
stimuli = stimulus_names()
fig, ax = plt.subplots()
plt.scatter(subject_exp_data[:, pc1 - 1], subject_exp_data[:, pc2 - 1], c="#31505A", marker='.')
# add labels to points
label_idx = 0
for x, y in zip(subject_exp_data[:, pc1 - 1], subject_exp_data[:, pc2 - 1]):
plt.annotate(stimuli[label_idx], # this is the text
(x, y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0, 1.5), # distance from text to points (x,y)
size=10,
ha='center') # horizontal alignment can be left, right or center
label_idx += 1
plt.xlabel('Principal Component {}'.format(pc1))
plt.ylabel('Principal Component {}'.format(pc2))
plt.title(subject_name)
plt.axis('square')
ax.set_xlim(-2, 3.5)
ax.set_ylim(-2, 3.5)
plt.show()
if __name__ == '__main__':
PATH_TO_NPY_FILE = input("Path to npy file containing 5D coordinates "
"(e.g., ./sample-materials/subject-data/model-fitting/S7/"
"S7_word_anchored_points_sigma_0.18_dim_5.npy): ")
NAME = input("Subject name or ID (e.g., S7): ")
data = np.load(PATH_TO_NPY_FILE)
data = stretch_axes(data)
scatterplots_2d_annotated(NAME, data) | 0.724968 | 0.625324 |
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
def evaluate_pose(x, att):
# x: B3N, att: B1KN1
# ts: B3k1
pai = att.sum(dim=3, keepdim=True) # B1K11
att = att / torch.clamp(pai, min=1e-3)
ts = torch.sum(
att * x[:, :, None, :, None], dim=3) # B3K1
return ts
def spatial_variance(x, att, norm_type="l2"):
pai = att.sum(dim=3, keepdim=True) # B1K11
att = att / torch.clamp(pai, min=1e-3)
ts = torch.sum(
att * x[:, :, None, :, None], dim=3) # B3K1
x_centered = x[:, :, None] - ts # B3KN
x_centered = x_centered.permute(0, 2, 3, 1) # BKN3
att = att.squeeze(1) # BKN1
cov = torch.matmul(
x_centered.transpose(3, 2), att * x_centered) # BK33
# l2 norm
vol = torch.diagonal(cov, dim1=-2, dim2=-1).sum(2) # BK
if norm_type == "l2":
vol = vol.norm(dim=1).mean()
elif norm_type == "l1":
vol = vol.sum(dim=1).mean()
else:
# vol, _ = torch.diagonal(cov, dim1=-2, dim2=-1).sum(2).max(dim=1)
raise NotImplementedError
return vol
def reg_att(att, x, config, **kwargs):
loss_dict = {}
# Localization loss
if config.loss_volume > 0:
loss_volume = spatial_variance(
x, att, norm_type=config.spatial_var_norm)
loss_dict["loss_volume"] = loss_volume * config.loss_volume
# Equilibrium loss
if config.loss_att_amount > 0:
pai = att.sum(dim=3, keepdim=True) # B1K11
loss_att_amount = torch.var(pai.reshape(pai.shape[0], -1), dim=1).mean()
loss_dict["loss_att_amount"] = loss_att_amount * config.loss_att_amount
# Equivariance loss
if config.loss_2cps > 0:
rt_rels = kwargs["rt_rels"] # gt relative pose among two views
# get kps
if not "kps" in kwargs:
if config.att_type_out in ["gmm", "None"]:
pai = att.sum(dim=3, keepdim=True) # B1K11
att = att / torch.clamp(pai, min=1e-3)
kps = torch.sum(
att * x[:, :, None, :, None], dim=3).squeeze(-1)# B3K
assert kps.shape[0] % config.num_view == 0
bs = kps.shape[0] // config.num_view
kps0 = kps[:bs]
kps1s = [kps[bs*i:bs*(i+1)] for i in range(1, config.num_view)]
else:
kps0, kps1s = kwargs["kps"]
loss_2cps = []
for i in range(config.num_view - 1):
rt_rel = rt_rels[i]
R_gt, T_gt = rt_rel
kps0_can = torch.matmul(R_gt, kps0) + T_gt
kps1 = kps1s[i]
loss_2cps += [((kps0_can - kps1) ** 2).sum(1).mean()]
loss_2cps = torch.stack(loss_2cps).mean()
loss_dict["loss_2cps"] = loss_2cps * config.loss_2cps
# to sum up losses
loss_dict["sum"] = torch.stack(list(loss_dict.values())).sum()
return loss_dict | loss_util.py | import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
def evaluate_pose(x, att):
# x: B3N, att: B1KN1
# ts: B3k1
pai = att.sum(dim=3, keepdim=True) # B1K11
att = att / torch.clamp(pai, min=1e-3)
ts = torch.sum(
att * x[:, :, None, :, None], dim=3) # B3K1
return ts
def spatial_variance(x, att, norm_type="l2"):
pai = att.sum(dim=3, keepdim=True) # B1K11
att = att / torch.clamp(pai, min=1e-3)
ts = torch.sum(
att * x[:, :, None, :, None], dim=3) # B3K1
x_centered = x[:, :, None] - ts # B3KN
x_centered = x_centered.permute(0, 2, 3, 1) # BKN3
att = att.squeeze(1) # BKN1
cov = torch.matmul(
x_centered.transpose(3, 2), att * x_centered) # BK33
# l2 norm
vol = torch.diagonal(cov, dim1=-2, dim2=-1).sum(2) # BK
if norm_type == "l2":
vol = vol.norm(dim=1).mean()
elif norm_type == "l1":
vol = vol.sum(dim=1).mean()
else:
# vol, _ = torch.diagonal(cov, dim1=-2, dim2=-1).sum(2).max(dim=1)
raise NotImplementedError
return vol
def reg_att(att, x, config, **kwargs):
loss_dict = {}
# Localization loss
if config.loss_volume > 0:
loss_volume = spatial_variance(
x, att, norm_type=config.spatial_var_norm)
loss_dict["loss_volume"] = loss_volume * config.loss_volume
# Equilibrium loss
if config.loss_att_amount > 0:
pai = att.sum(dim=3, keepdim=True) # B1K11
loss_att_amount = torch.var(pai.reshape(pai.shape[0], -1), dim=1).mean()
loss_dict["loss_att_amount"] = loss_att_amount * config.loss_att_amount
# Equivariance loss
if config.loss_2cps > 0:
rt_rels = kwargs["rt_rels"] # gt relative pose among two views
# get kps
if not "kps" in kwargs:
if config.att_type_out in ["gmm", "None"]:
pai = att.sum(dim=3, keepdim=True) # B1K11
att = att / torch.clamp(pai, min=1e-3)
kps = torch.sum(
att * x[:, :, None, :, None], dim=3).squeeze(-1)# B3K
assert kps.shape[0] % config.num_view == 0
bs = kps.shape[0] // config.num_view
kps0 = kps[:bs]
kps1s = [kps[bs*i:bs*(i+1)] for i in range(1, config.num_view)]
else:
kps0, kps1s = kwargs["kps"]
loss_2cps = []
for i in range(config.num_view - 1):
rt_rel = rt_rels[i]
R_gt, T_gt = rt_rel
kps0_can = torch.matmul(R_gt, kps0) + T_gt
kps1 = kps1s[i]
loss_2cps += [((kps0_can - kps1) ** 2).sum(1).mean()]
loss_2cps = torch.stack(loss_2cps).mean()
loss_dict["loss_2cps"] = loss_2cps * config.loss_2cps
# to sum up losses
loss_dict["sum"] = torch.stack(list(loss_dict.values())).sum()
return loss_dict | 0.539469 | 0.569254 |
from types import TracebackType
from typing import Any, Optional, Type
from scrapli.channel import Channel
from scrapli.driver.base_driver import ASYNCIO_TRANSPORTS, ScrapeBase
from scrapli.exceptions import TransportPluginError
class Scrape(ScrapeBase):
def __init__(self, **kwargs: Any) -> None:
"""
Scrape Object
Scrape is the base class for NetworkDriver, and subsequent platform specific drivers (i.e.
IOSXEDriver). Scrape can be used on its own and offers a semi-pexpect like experience in
that it doesn't know or care about privilege levels, platform types, and things like that.
*Note* most arguments passed to Scrape do not actually get assigned to the scrape object
itself, but instead are used to construct the Transport and Channel classes that Scrape
relies on, see Transport and Channel docs for details.
Args:
kwargs: Keyword arguments to pass to `ScrapeBase` -- see `ScrapeBase` for available args
Returns:
N/A # noqa: DAR202
Raises:
TransportPluginError: if attempting to use an asyncio transport plugin
"""
super().__init__(**kwargs)
if self._transport in ASYNCIO_TRANSPORTS:
raise TransportPluginError(
f"Attempting to use transport type {self._transport} with a sync driver, "
"must use a non-asyncio transport"
)
self.channel = Channel(transport=self.transport, **self.channel_args)
def __enter__(self) -> "Scrape":
"""
Enter method for context manager
Args:
N/A
Returns:
self: instance of self
Raises:
N/A
"""
self.open()
return self
def __exit__(
self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""
Exit method to cleanup for context manager
Args:
exception_type: exception type being raised
exception_value: message from exception being raised
traceback: traceback from exception being raised
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
self.close()
def open(self) -> None:
"""
Open Transport (socket/session) and establish channel
If on_open callable provided, execute that callable after opening connection
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
self.logger.info(f"Opening connection to {self._initialization_args['host']}")
self.transport.open()
if self.on_open:
self.on_open(self)
self.logger.info(f"Connection to {self._initialization_args['host']} opened successfully")
def close(self) -> None:
"""
Close Transport (socket/session)
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
self.logger.info(f"Closing connection to {self._initialization_args['host']}")
if self.on_close:
self.on_close(self)
self.transport.close()
self.logger.info(f"Connection to {self._initialization_args['host']} closed successfully") | venv/Lib/site-packages/scrapli/driver/driver.py | from types import TracebackType
from typing import Any, Optional, Type
from scrapli.channel import Channel
from scrapli.driver.base_driver import ASYNCIO_TRANSPORTS, ScrapeBase
from scrapli.exceptions import TransportPluginError
class Scrape(ScrapeBase):
def __init__(self, **kwargs: Any) -> None:
"""
Scrape Object
Scrape is the base class for NetworkDriver, and subsequent platform specific drivers (i.e.
IOSXEDriver). Scrape can be used on its own and offers a semi-pexpect like experience in
that it doesn't know or care about privilege levels, platform types, and things like that.
*Note* most arguments passed to Scrape do not actually get assigned to the scrape object
itself, but instead are used to construct the Transport and Channel classes that Scrape
relies on, see Transport and Channel docs for details.
Args:
kwargs: Keyword arguments to pass to `ScrapeBase` -- see `ScrapeBase` for available args
Returns:
N/A # noqa: DAR202
Raises:
TransportPluginError: if attempting to use an asyncio transport plugin
"""
super().__init__(**kwargs)
if self._transport in ASYNCIO_TRANSPORTS:
raise TransportPluginError(
f"Attempting to use transport type {self._transport} with a sync driver, "
"must use a non-asyncio transport"
)
self.channel = Channel(transport=self.transport, **self.channel_args)
def __enter__(self) -> "Scrape":
"""
Enter method for context manager
Args:
N/A
Returns:
self: instance of self
Raises:
N/A
"""
self.open()
return self
def __exit__(
self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""
Exit method to cleanup for context manager
Args:
exception_type: exception type being raised
exception_value: message from exception being raised
traceback: traceback from exception being raised
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
self.close()
def open(self) -> None:
"""
Open Transport (socket/session) and establish channel
If on_open callable provided, execute that callable after opening connection
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
self.logger.info(f"Opening connection to {self._initialization_args['host']}")
self.transport.open()
if self.on_open:
self.on_open(self)
self.logger.info(f"Connection to {self._initialization_args['host']} opened successfully")
def close(self) -> None:
"""
Close Transport (socket/session)
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
self.logger.info(f"Closing connection to {self._initialization_args['host']}")
if self.on_close:
self.on_close(self)
self.transport.close()
self.logger.info(f"Connection to {self._initialization_args['host']} closed successfully") | 0.902889 | 0.193814 |
from dlflow.mgr import model, config
from dlflow.models import ModelBase
import tensorflow as tf
class _Embedding(tf.keras.layers.Layer):
def __init__(self, input_dim, output_dim):
super(_Embedding, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
def build(self, input_shape):
self.embedding = self.add_weight(name="emb_w",
shape=[self.input_dim,
self.output_dim],
initializer='uniform')
def call(self, inputs, **kwargs):
emb = tf.nn.embedding_lookup(self.embedding, inputs)
out_dim = inputs.shape[-1] * self.output_dim
return tf.reshape(emb, [-1, out_dim])
@model.reg("DNNBinaryClassifier")
class DNNBinaryClassifier(ModelBase):
cfg = config.setting(
config.req("MODEL.layers"),
config.opt("MODEL.learning_rate", 0.001),
config.opt("MODEL.batch_size", 128)
)
def __init__(self, fmap):
super(DNNBinaryClassifier, self).__init__(fmap)
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=config.MODEL.learning_rate)
self.compute_loss = tf.keras.losses.BinaryCrossentropy(
from_logits=True)
self.mean_loss = tf.keras.metrics.Mean()
self.acc = tf.keras.metrics.BinaryAccuracy()
self.auc = tf.keras.metrics.AUC()
self.metrics = {
"mean_loss": self.mean_loss,
"acc": self.acc,
"auc": self.auc
}
def build(self):
concat_list = self.get_inputs(tp="nums")
for ctg_inp, depth in self.get_inputs(tp="ctgs", with_depth=True):
_emb = _Embedding(depth, 6)(ctg_inp)
concat_list.append(_emb)
net = tf.concat(concat_list, axis=1)
for size in config.MODEL.layers:
net = tf.keras.layers.Dense(size, activation=tf.nn.relu)(net)
logits = tf.keras.layers.Dense(1)(net)
sigmoid = tf.nn.sigmoid(logits)
self.set_output(logits, "logits")
self.set_output(sigmoid, "sigmoid")
@tf.function
def train(self, feature, label):
_label = label["label"]
with tf.GradientTape() as tape:
logits, sigmoid = self.model(feature)
loss = self.compute_loss(_label, logits)
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(grads, self.model.trainable_variables))
self.mean_loss(loss)
self.acc(_label, sigmoid)
self.auc(_label, sigmoid)
@tf.function
def evaluate(self, feature, label):
_label = label["label"]
logits, sigmoid = self.model(feature)
loss = self.compute_loss(_label, logits)
self.mean_loss(loss)
self.acc(_label, sigmoid)
self.auc(_label, sigmoid)
@tf.function
def predict(self, feature):
pred = self.model(feature)
return pred | dlflow/models/internal/DNNBinaryClassifier.py | from dlflow.mgr import model, config
from dlflow.models import ModelBase
import tensorflow as tf
class _Embedding(tf.keras.layers.Layer):
def __init__(self, input_dim, output_dim):
super(_Embedding, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
def build(self, input_shape):
self.embedding = self.add_weight(name="emb_w",
shape=[self.input_dim,
self.output_dim],
initializer='uniform')
def call(self, inputs, **kwargs):
emb = tf.nn.embedding_lookup(self.embedding, inputs)
out_dim = inputs.shape[-1] * self.output_dim
return tf.reshape(emb, [-1, out_dim])
@model.reg("DNNBinaryClassifier")
class DNNBinaryClassifier(ModelBase):
cfg = config.setting(
config.req("MODEL.layers"),
config.opt("MODEL.learning_rate", 0.001),
config.opt("MODEL.batch_size", 128)
)
def __init__(self, fmap):
super(DNNBinaryClassifier, self).__init__(fmap)
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=config.MODEL.learning_rate)
self.compute_loss = tf.keras.losses.BinaryCrossentropy(
from_logits=True)
self.mean_loss = tf.keras.metrics.Mean()
self.acc = tf.keras.metrics.BinaryAccuracy()
self.auc = tf.keras.metrics.AUC()
self.metrics = {
"mean_loss": self.mean_loss,
"acc": self.acc,
"auc": self.auc
}
def build(self):
concat_list = self.get_inputs(tp="nums")
for ctg_inp, depth in self.get_inputs(tp="ctgs", with_depth=True):
_emb = _Embedding(depth, 6)(ctg_inp)
concat_list.append(_emb)
net = tf.concat(concat_list, axis=1)
for size in config.MODEL.layers:
net = tf.keras.layers.Dense(size, activation=tf.nn.relu)(net)
logits = tf.keras.layers.Dense(1)(net)
sigmoid = tf.nn.sigmoid(logits)
self.set_output(logits, "logits")
self.set_output(sigmoid, "sigmoid")
@tf.function
def train(self, feature, label):
_label = label["label"]
with tf.GradientTape() as tape:
logits, sigmoid = self.model(feature)
loss = self.compute_loss(_label, logits)
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(grads, self.model.trainable_variables))
self.mean_loss(loss)
self.acc(_label, sigmoid)
self.auc(_label, sigmoid)
@tf.function
def evaluate(self, feature, label):
_label = label["label"]
logits, sigmoid = self.model(feature)
loss = self.compute_loss(_label, logits)
self.mean_loss(loss)
self.acc(_label, sigmoid)
self.auc(_label, sigmoid)
@tf.function
def predict(self, feature):
pred = self.model(feature)
return pred | 0.922024 | 0.371707 |
from armstrong.dev.tests.utils.backports import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.test.client import Client
from functools import wraps
import fudge
import os
import random
from ._utils import TestCase
from .. import constants
from .. import forms
from .. import models
from .. import views
def failed_purchase(func):
def inner(self):
random_text = "Some Random Text (%d)" % random.randint(1000, 2000)
backend = self.get_backend_stub(successful=False, reason=random_text)
self.patches = [
fudge.patch_object(views, "backends", backend),
]
fudge.clear_calls()
data = self.random_post_data
response = self.client.post(self.url, data)
backend_response = backend.get_backend().purchase()["response"]
func(self, response, random_text=random_text,
backend_response=backend_response)
return inner
class BaseDonationFormViewTestCase(TestCase):
view_class = views.DonationFormView
view_name = "donations_form"
@property
def url(self):
# TODO: move this into armstrong.dev
return reverse(self.view_name)
def setUp(self):
super(BaseDonationFormViewTestCase, self).setUp()
# TODO: move this to armstrong.dev
self.client = Client()
# TODO: make this based off of class name and move into armstrong.dev
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), "_templates"),
)
self.client
self.patches = [
fudge.patch_object(views, "backends", self.get_backend_stub())
]
fudge.clear_calls()
def tearDown(self):
super(BaseDonationFormViewTestCase, self).tearDown()
def assert_in_context(self, response, name):
# TODO: move this into armstrong.dev
context = (response.context if hasattr(response, "context")
else response.context_data)
self.assertTrue(name in context,
msg="%s was not in the context" % name)
def assert_type_in_context(self, response, name, expected_type):
self.assert_in_context(response, name)
context = (response.context if hasattr(response, "context")
else response.context_data)
self.assertTrue(isinstance(context[name], expected_type),
msg="%s in the context, but does not have a class of %s" % (
name, expected_type.__name__))
def assert_value_in_context(self, response, name, expected_value):
self.assert_in_context(response, name)
context = (response.context if hasattr(response, "context")
else response.context_data)
self.assertEqual(context[name], expected_value,
msg="%s in the context, but not equal to '%s'" % (
name, expected_value))
def assert_template(self, template, response):
template_names = [a.name for a in response.templates]
self.assertTrue(template in template_names,
msg="%s not found in templates: %s" % (
template, response.templates))
def assert_form_has_errors(self, response, form_name, error_fields=None):
self.assert_in_context(response, form_name)
form = response.context[form_name]
self.assertNotEqual(form.errors, [],
msg="%s.errors was empty?" % form_name)
if error_fields:
for field in error_fields:
self.assertTrue(field in form.errors,
msg="%s not in the errors" % field)
def assert_subform_has_errors(self, response, subform_name,
error_fields=None):
form = response.context["donation_form"]
self.assertTrue(hasattr(form, subform_name))
subform = getattr(form, subform_name)
if error_fields:
for field in error_fields:
self.assertTrue(field in subform.errors,
msg="%s not in the errors" % field)
def get_view_object(self):
view = self.view_class()
view.request = self.factory.get(self.url)
return view
def get_response(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code, msg="sanity check")
return response
def get_fake_post_request(self, confirmed=False):
d = {} if not confirmed else {"confirmed": u"1"}
return self.factory.post(self.url, d)
@property
def fake_get_request(self):
return self.factory.get(self.url)
def get_post_view(self, confirmed=False):
v = views.DonationFormView(confirm=True)
v.request = self.get_fake_post_request(confirmed=confirmed)
return v
post_view = property(get_post_view)
# TODO: move to armstrong.dev
def get_response(func):
@wraps(func)
def inner(self):
func(self, self.get_response())
return inner
class DonationFormViewGetTestCase(BaseDonationFormViewTestCase):
@get_response
def test_adds_form_action_url_to_context(self, response):
self.assert_value_in_context(response, "form_action_url", "")
@get_response
def test_adds_donation_formset_to_context(self, response):
self.assert_type_in_context(response, "donation_form",
forms.BaseDonationForm)
def test_get_donation_form_returns_credit_card_form_by_default(self):
# TODO: make sure in "default" state
view = self.get_view_object()
donation_form = view.get_donation_form()
self.assertIsA(donation_form, forms.CreditCardDonationForm)
def test_get_context_turns_kwargs_into_params(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
view = self.get_view_object()
context = view.get_context_data(**random_kwargs)
self.assertEqual(len(context["params"]), len(random_kwargs),
msg="verify context.params is the same length")
for key in context["params"].keys():
self.assert_(key in random_kwargs)
def test_form_is_invalid_passes_kwargs_to_get_context_data(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
get_context_data = fudge.Fake()
get_context_data.expects_call().with_args(**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_context_data", get_context_data):
view.post({}, **random_kwargs)
fudge.verify()
def test_form_is_valid_passes_kwargs_to_get_context_data(self):
donation, donation_form = self.random_donation_and_form
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
get_context_data = fudge.Fake()
get_context_data.expects_call().with_args(**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_context_data", get_context_data):
view.form_is_valid(donation_form, **random_kwargs)
fudge.verify()
def test_form_is_valid_passes_kwargs_to_purchase_failed(self):
donation, donation_form = self.random_donation_and_form
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
view = self.post_view
view.confirm = False
backends = self.get_backend_stub(successful=False)
backend_response = backends.get_backend().purchase()
purchase_failed = fudge.Fake()
purchase_failed.expects_call().with_args(backend_response,
**random_kwargs)
with fudge.patched_context(views, "backends", backends):
with fudge.patched_context(view, "purchase_failed",
purchase_failed):
view.form_is_valid(donation_form, **random_kwargs)
fudge.verify()
def test_purchase_failed_passes_kwargs_to_get_context_data(self):
backend_response = {
"reason": "Some Random Reason",
"response": "Some Random Response",
}
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
get_context_data = fudge.Fake()
(get_context_data.expects_call()
.with_args(**random_kwargs)
.returns({}))
view = self.post_view
with fudge.patched_context(view, "get_context_data", get_context_data):
view.purchase_failed(backend_response, **random_kwargs)
fudge.verify()
def form_is_valid_response(confirmed=False):
def outer(func):
@wraps(func)
def inner(self, *args, **kwargs):
donation, form = self.random_donation_and_form
fake_save = fudge.Fake().is_callable().returns(donation)
setattr(form, "save", fake_save)
v = self.get_post_view(confirmed=confirmed)
response = v.form_is_valid(form)
func(self, response)
return inner
return outer
class DonationFormViewPostWithConfirmTestCase(BaseDonationFormViewTestCase):
view_name = "donations_form_confirm"
def test_use_confirm_template_false_by_default(self):
v = views.DonationFormView()
self.assertFalse(v.use_confirm_template)
def test_use_confirm_template_true_if_confirmation_required(self):
v = self.post_view
self.assertTrue(v.use_confirm_template)
def test_use_confirm_template_false_if_confirmed(self):
v = self.get_post_view(confirmed=True)
self.assertFalse(v.use_confirm_template)
def test_use_confirm_template_false_if_confirmation_failed(self):
v = self.post_view
v.form_validation_failed = True
self.assertFalse(v.use_confirm_template)
def test_swaps_templates_on_confirmation(self):
v = self.post_view
self.assertEqual(v.confirm_template_name, v.get_template_names()[0])
def test_uses_regular_template_when_confirmation_not_required(self):
v = self.get_post_view(confirmed=True)
self.assertEqual(v.template_name, v.get_template_names()[0])
def test_uses_regular_template_on_get_request(self):
v = views.DonationFormView(confirm=True)
v.request = self.fake_get_request
self.assertEqual(v.template_name, v.get_template_names()[0])
def test_uses_regular_template_on_invalid_request(self):
v = self.post_view
v.form_validation_failed = True
self.assertEqual(v.template_name, v.get_template_names()[0])
def test_form_is_invalid_uses_regular_template(self):
v = self.post_view
response = v.form_is_invalid()
self.assertEqual(v.template_name, response.template_name[0])
def test_requires_confirmation_is_true_by_default_on_posts(self):
self.assertTrue(self.post_view.requires_confirmation)
def test_requires_confirmation_is_false_if_confirmed(self):
v = self.get_post_view(confirmed=True)
self.assertFalse(v.requires_confirmation)
@form_is_valid_response()
def test_form_is_valid_re_renders_if_confirmation_is_required(self, r):
self.assertIsA(r, TemplateResponse)
@form_is_valid_response()
def test_contains_confirmation_required_in_context(self, r):
self.assert_value_in_context(r, "confirmation_required", True)
@form_is_valid_response(confirmed=True)
def test_redirects_on_confirmed(self, r):
self.assertIsA(r, HttpResponseRedirect)
def test_form_is_invalid_receives_kwargs_from_post(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
donation_form = fudge.Fake()
donation_form.provides("is_valid").returns(False)
get_donation_form = fudge.Fake()
get_donation_form.is_callable().returns(donation_form)
form_is_invalid = fudge.Fake()
form_is_invalid.expects_call().with_args(**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_donation_form",
get_donation_form):
with fudge.patched_context(view, "form_is_invalid",
form_is_invalid):
view.post({}, **random_kwargs)
fudge.verify()
def test_post_passes_kwargs_to_form_is_valid(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
donation_form = fudge.Fake()
donation_form.provides("is_valid").returns(True)
get_donation_form = fudge.Fake()
get_donation_form.is_callable().returns(donation_form)
form_is_valid = fudge.Fake()
form_is_valid.expects_call().with_args(donation_form=donation_form,
**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_donation_form",
get_donation_form):
with fudge.patched_context(view, "form_is_valid", form_is_valid):
view.post({}, **random_kwargs)
fudge.verify()
class DonationFormViewPostTestCase(BaseDonationFormViewTestCase):
@property
def random_post_data(self):
data = self.get_base_random_data()
address_kwargs = self.random_address_kwargs
prefixed_address_kwargs = self.prefix_data(address_kwargs,
prefix="billing")
data.update(prefixed_address_kwargs)
return data
def test_requires_confirmation_is_false(self):
self.assertFalse(self.get_view_object().requires_confirmation)
def test_saves_donation_on_post_with_minimal_information(self):
name_kwargs = self.random_donor_kwargs
random_amount = self.random_amount
data = self.get_base_random_data(amount=random_amount,
**name_kwargs)
data.update(self.get_data_as_formset())
# sanity check
self.assertRaises(models.Donor.DoesNotExist,
models.Donor.objects.get, **name_kwargs)
with override_settings(ARMSTRONG_DONATION_FORM="SimpleDonationForm"):
self.client.post(self.url, data)
donor = models.Donor.objects.get(**name_kwargs)
self.assertEqual(str(donor), " ".join(name_kwargs.values()))
donation = models.Donation.objects.get(donor=donor)
self.assertEqual(donation.amount, random_amount)
def test_uses_promo_code_if_available(self):
promo_code = self.random_discount
name_kwargs = self.random_donor_kwargs
random_amount = self. random_amount
data = self.get_base_random_data(amount=random_amount,
promo_code=promo_code.code, **name_kwargs)
data.update(self.prefix_data(self.random_address_kwargs,
prefix="billing"))
self.client.post(self.url, data)
donor = models.Donor.objects.get(**name_kwargs)
donation = models.Donation.objects.get(donor=donor)
self.assertEqual(promo_code, donation.code)
d = fudge.Fake().has_attr(amount=random_amount)
self.assertAlmostEqual(promo_code.calculate(d),
donation.amount, places=2)
def test_saves_address_if_present(self):
name_kwargs = self.random_donor_kwargs
address_kwargs = self.random_address_kwargs
data = self.get_base_random_data(**name_kwargs)
data.update(self.prefix_data(address_kwargs, prefix="billing"))
self.client.post(self.url, data)
donor = models.Donor.objects.get(**name_kwargs)
address = models.DonorAddress.objects.get(**address_kwargs)
self.assertEqual(address, donor.address)
self.assertEqual(address, donor.mailing_address)
def test_saves_mailing_address_if_present(self):
name_kwargs = self.random_donor_kwargs
address_kwargs = self.random_address_kwargs
mailing_address_kwargs = self.random_address_kwargs
data = self.get_base_random_data(**name_kwargs)
data.update(self.prefix_data(address_kwargs, prefix="billing"))
data.update(self.prefix_data(mailing_address_kwargs, prefix="mailing"))
del data[constants.MAILING_SAME_AS_BILLING]
self.assertEqual(0, len(models.DonorAddress.objects.all()),
msg="sanity check")
self.client.post(self.url, data)
self.assertEqual(2, len(models.DonorAddress.objects.all()))
address = models.DonorAddress.objects.get(**address_kwargs)
mailing_address = models.DonorAddress.objects.get(
**mailing_address_kwargs)
self.assertNotEqual(address, mailing_address)
donor = models.Donor.objects.get(**name_kwargs)
self.assertEqual(address, donor.address)
self.assertEqual(mailing_address, donor.mailing_address)
def test_only_saves_donor_once(self):
"""
Verify the number of queries that are run.
This assumes that the tests are run in isolation from the backend.
This will pass if #17594 is merged in.
"""
data = self.random_post_data
with self.assertNumQueries(3):
self.client.post(self.url, data)
def test_saves_mailing_address_if_same_as_billing_is_checked(self):
data = self.random_post_data
data["mailing_same_as_billing"] = u"1"
self.client.post(self.url, data)
donor = models.Donor.objects.get(first_name=data["first_name"],
last_name=data["last_name"])
self.assertEqual(donor.address, donor.mailing_address)
def test_same_as_billing_overrides_second_address(self):
data = self.random_post_data
data.update(self.prefix_data(self.random_address_kwargs,
prefix="billing"))
data.update(self.prefix_data(self.random_address_kwargs,
prefix="mailing"))
data["mailing_same_as_billing"] = u"1"
self.client.post(self.url, data)
donor = models.Donor.objects.get(first_name=data["first_name"],
last_name=data["last_name"])
self.assertEqual(donor.address, donor.mailing_address)
def test_redirects_to_success_url_after_successful_save(self):
data = self.random_post_data
response = self.client.post(self.url, data)
self.assertRedirects(response, reverse("donations_thanks"))
def test_displays_error_on_donation_form_validation_error(self):
data = self.random_post_data
del data["ccv_code"]
response = self.client.post(self.url, data)
self.assert_template("armstrong/donations/donation.html", response)
self.assert_form_has_errors(response, "donation_form", ["ccv_code", ])
def test_displays_errors_on_address_validation_error(self):
data = self.random_post_data
data["billing-address"] = ""
response = self.client.post(self.url, data)
self.assert_template("armstrong/donations/donation.html", response)
self.assert_subform_has_errors(response, "billing_address_form")
def test_displays_errors_on_mailing_address_validation_error(self):
data = self.random_post_data
data.update(self.prefix_data(self.random_address_kwargs,
prefix="billing"))
data.update(self.prefix_data(self.random_address_kwargs,
prefix="mailing"))
del data["mailing_same_as_billing"]
data["mailing-address"] = ""
response = self.client.post(self.url, data)
self.assert_template("armstrong/donations/donation.html", response)
self.assert_subform_has_errors(response, "mailing_address_form")
@failed_purchase
def test_does_redisplays_form_on_failed_donation(self, response, **kwargs):
self.assertEqual(200, response.status_code)
self.assert_template("armstrong/donations/donation.html", response)
@failed_purchase
def test_error_msg_in_context_on_failed_purchase(self, response, **kwargs):
self.assert_value_in_context(response, "error_msg",
"Unable to process payment")
@failed_purchase
def test_reason_in_context_on_failed_purchase(self, response, random_text,
**kwargs):
self.assert_value_in_context(response, "reason", random_text)
@failed_purchase
def test_response_in_context_on_failed_purchase(self, response,
backend_response, **kwargs):
self.assert_value_in_context(response, "response", backend_response) | armstrong/apps/donations/tests/views.py | from armstrong.dev.tests.utils.backports import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.test.client import Client
from functools import wraps
import fudge
import os
import random
from ._utils import TestCase
from .. import constants
from .. import forms
from .. import models
from .. import views
def failed_purchase(func):
def inner(self):
random_text = "Some Random Text (%d)" % random.randint(1000, 2000)
backend = self.get_backend_stub(successful=False, reason=random_text)
self.patches = [
fudge.patch_object(views, "backends", backend),
]
fudge.clear_calls()
data = self.random_post_data
response = self.client.post(self.url, data)
backend_response = backend.get_backend().purchase()["response"]
func(self, response, random_text=random_text,
backend_response=backend_response)
return inner
class BaseDonationFormViewTestCase(TestCase):
view_class = views.DonationFormView
view_name = "donations_form"
@property
def url(self):
# TODO: move this into armstrong.dev
return reverse(self.view_name)
def setUp(self):
super(BaseDonationFormViewTestCase, self).setUp()
# TODO: move this to armstrong.dev
self.client = Client()
# TODO: make this based off of class name and move into armstrong.dev
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), "_templates"),
)
self.client
self.patches = [
fudge.patch_object(views, "backends", self.get_backend_stub())
]
fudge.clear_calls()
def tearDown(self):
super(BaseDonationFormViewTestCase, self).tearDown()
def assert_in_context(self, response, name):
# TODO: move this into armstrong.dev
context = (response.context if hasattr(response, "context")
else response.context_data)
self.assertTrue(name in context,
msg="%s was not in the context" % name)
def assert_type_in_context(self, response, name, expected_type):
self.assert_in_context(response, name)
context = (response.context if hasattr(response, "context")
else response.context_data)
self.assertTrue(isinstance(context[name], expected_type),
msg="%s in the context, but does not have a class of %s" % (
name, expected_type.__name__))
def assert_value_in_context(self, response, name, expected_value):
self.assert_in_context(response, name)
context = (response.context if hasattr(response, "context")
else response.context_data)
self.assertEqual(context[name], expected_value,
msg="%s in the context, but not equal to '%s'" % (
name, expected_value))
def assert_template(self, template, response):
template_names = [a.name for a in response.templates]
self.assertTrue(template in template_names,
msg="%s not found in templates: %s" % (
template, response.templates))
def assert_form_has_errors(self, response, form_name, error_fields=None):
self.assert_in_context(response, form_name)
form = response.context[form_name]
self.assertNotEqual(form.errors, [],
msg="%s.errors was empty?" % form_name)
if error_fields:
for field in error_fields:
self.assertTrue(field in form.errors,
msg="%s not in the errors" % field)
def assert_subform_has_errors(self, response, subform_name,
error_fields=None):
form = response.context["donation_form"]
self.assertTrue(hasattr(form, subform_name))
subform = getattr(form, subform_name)
if error_fields:
for field in error_fields:
self.assertTrue(field in subform.errors,
msg="%s not in the errors" % field)
def get_view_object(self):
view = self.view_class()
view.request = self.factory.get(self.url)
return view
def get_response(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code, msg="sanity check")
return response
def get_fake_post_request(self, confirmed=False):
d = {} if not confirmed else {"confirmed": u"1"}
return self.factory.post(self.url, d)
@property
def fake_get_request(self):
return self.factory.get(self.url)
def get_post_view(self, confirmed=False):
v = views.DonationFormView(confirm=True)
v.request = self.get_fake_post_request(confirmed=confirmed)
return v
post_view = property(get_post_view)
# TODO: move to armstrong.dev
def get_response(func):
@wraps(func)
def inner(self):
func(self, self.get_response())
return inner
class DonationFormViewGetTestCase(BaseDonationFormViewTestCase):
@get_response
def test_adds_form_action_url_to_context(self, response):
self.assert_value_in_context(response, "form_action_url", "")
@get_response
def test_adds_donation_formset_to_context(self, response):
self.assert_type_in_context(response, "donation_form",
forms.BaseDonationForm)
def test_get_donation_form_returns_credit_card_form_by_default(self):
# TODO: make sure in "default" state
view = self.get_view_object()
donation_form = view.get_donation_form()
self.assertIsA(donation_form, forms.CreditCardDonationForm)
def test_get_context_turns_kwargs_into_params(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
view = self.get_view_object()
context = view.get_context_data(**random_kwargs)
self.assertEqual(len(context["params"]), len(random_kwargs),
msg="verify context.params is the same length")
for key in context["params"].keys():
self.assert_(key in random_kwargs)
def test_form_is_invalid_passes_kwargs_to_get_context_data(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
get_context_data = fudge.Fake()
get_context_data.expects_call().with_args(**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_context_data", get_context_data):
view.post({}, **random_kwargs)
fudge.verify()
def test_form_is_valid_passes_kwargs_to_get_context_data(self):
donation, donation_form = self.random_donation_and_form
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
get_context_data = fudge.Fake()
get_context_data.expects_call().with_args(**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_context_data", get_context_data):
view.form_is_valid(donation_form, **random_kwargs)
fudge.verify()
def test_form_is_valid_passes_kwargs_to_purchase_failed(self):
donation, donation_form = self.random_donation_and_form
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
view = self.post_view
view.confirm = False
backends = self.get_backend_stub(successful=False)
backend_response = backends.get_backend().purchase()
purchase_failed = fudge.Fake()
purchase_failed.expects_call().with_args(backend_response,
**random_kwargs)
with fudge.patched_context(views, "backends", backends):
with fudge.patched_context(view, "purchase_failed",
purchase_failed):
view.form_is_valid(donation_form, **random_kwargs)
fudge.verify()
def test_purchase_failed_passes_kwargs_to_get_context_data(self):
backend_response = {
"reason": "Some Random Reason",
"response": "Some Random Response",
}
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
get_context_data = fudge.Fake()
(get_context_data.expects_call()
.with_args(**random_kwargs)
.returns({}))
view = self.post_view
with fudge.patched_context(view, "get_context_data", get_context_data):
view.purchase_failed(backend_response, **random_kwargs)
fudge.verify()
def form_is_valid_response(confirmed=False):
def outer(func):
@wraps(func)
def inner(self, *args, **kwargs):
donation, form = self.random_donation_and_form
fake_save = fudge.Fake().is_callable().returns(donation)
setattr(form, "save", fake_save)
v = self.get_post_view(confirmed=confirmed)
response = v.form_is_valid(form)
func(self, response)
return inner
return outer
class DonationFormViewPostWithConfirmTestCase(BaseDonationFormViewTestCase):
view_name = "donations_form_confirm"
def test_use_confirm_template_false_by_default(self):
v = views.DonationFormView()
self.assertFalse(v.use_confirm_template)
def test_use_confirm_template_true_if_confirmation_required(self):
v = self.post_view
self.assertTrue(v.use_confirm_template)
def test_use_confirm_template_false_if_confirmed(self):
v = self.get_post_view(confirmed=True)
self.assertFalse(v.use_confirm_template)
def test_use_confirm_template_false_if_confirmation_failed(self):
v = self.post_view
v.form_validation_failed = True
self.assertFalse(v.use_confirm_template)
def test_swaps_templates_on_confirmation(self):
v = self.post_view
self.assertEqual(v.confirm_template_name, v.get_template_names()[0])
def test_uses_regular_template_when_confirmation_not_required(self):
v = self.get_post_view(confirmed=True)
self.assertEqual(v.template_name, v.get_template_names()[0])
def test_uses_regular_template_on_get_request(self):
v = views.DonationFormView(confirm=True)
v.request = self.fake_get_request
self.assertEqual(v.template_name, v.get_template_names()[0])
def test_uses_regular_template_on_invalid_request(self):
v = self.post_view
v.form_validation_failed = True
self.assertEqual(v.template_name, v.get_template_names()[0])
def test_form_is_invalid_uses_regular_template(self):
v = self.post_view
response = v.form_is_invalid()
self.assertEqual(v.template_name, response.template_name[0])
def test_requires_confirmation_is_true_by_default_on_posts(self):
self.assertTrue(self.post_view.requires_confirmation)
def test_requires_confirmation_is_false_if_confirmed(self):
v = self.get_post_view(confirmed=True)
self.assertFalse(v.requires_confirmation)
@form_is_valid_response()
def test_form_is_valid_re_renders_if_confirmation_is_required(self, r):
self.assertIsA(r, TemplateResponse)
@form_is_valid_response()
def test_contains_confirmation_required_in_context(self, r):
self.assert_value_in_context(r, "confirmation_required", True)
@form_is_valid_response(confirmed=True)
def test_redirects_on_confirmed(self, r):
self.assertIsA(r, HttpResponseRedirect)
def test_form_is_invalid_receives_kwargs_from_post(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
donation_form = fudge.Fake()
donation_form.provides("is_valid").returns(False)
get_donation_form = fudge.Fake()
get_donation_form.is_callable().returns(donation_form)
form_is_invalid = fudge.Fake()
form_is_invalid.expects_call().with_args(**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_donation_form",
get_donation_form):
with fudge.patched_context(view, "form_is_invalid",
form_is_invalid):
view.post({}, **random_kwargs)
fudge.verify()
def test_post_passes_kwargs_to_form_is_valid(self):
r = lambda: random.randint(100, 200)
random_kwargs = {
"slug%d" % r(): "foo-%d" % r(),
}
donation_form = fudge.Fake()
donation_form.provides("is_valid").returns(True)
get_donation_form = fudge.Fake()
get_donation_form.is_callable().returns(donation_form)
form_is_valid = fudge.Fake()
form_is_valid.expects_call().with_args(donation_form=donation_form,
**random_kwargs)
view = self.post_view
with fudge.patched_context(view, "get_donation_form",
get_donation_form):
with fudge.patched_context(view, "form_is_valid", form_is_valid):
view.post({}, **random_kwargs)
fudge.verify()
class DonationFormViewPostTestCase(BaseDonationFormViewTestCase):
@property
def random_post_data(self):
data = self.get_base_random_data()
address_kwargs = self.random_address_kwargs
prefixed_address_kwargs = self.prefix_data(address_kwargs,
prefix="billing")
data.update(prefixed_address_kwargs)
return data
def test_requires_confirmation_is_false(self):
self.assertFalse(self.get_view_object().requires_confirmation)
def test_saves_donation_on_post_with_minimal_information(self):
name_kwargs = self.random_donor_kwargs
random_amount = self.random_amount
data = self.get_base_random_data(amount=random_amount,
**name_kwargs)
data.update(self.get_data_as_formset())
# sanity check
self.assertRaises(models.Donor.DoesNotExist,
models.Donor.objects.get, **name_kwargs)
with override_settings(ARMSTRONG_DONATION_FORM="SimpleDonationForm"):
self.client.post(self.url, data)
donor = models.Donor.objects.get(**name_kwargs)
self.assertEqual(str(donor), " ".join(name_kwargs.values()))
donation = models.Donation.objects.get(donor=donor)
self.assertEqual(donation.amount, random_amount)
def test_uses_promo_code_if_available(self):
promo_code = self.random_discount
name_kwargs = self.random_donor_kwargs
random_amount = self. random_amount
data = self.get_base_random_data(amount=random_amount,
promo_code=promo_code.code, **name_kwargs)
data.update(self.prefix_data(self.random_address_kwargs,
prefix="billing"))
self.client.post(self.url, data)
donor = models.Donor.objects.get(**name_kwargs)
donation = models.Donation.objects.get(donor=donor)
self.assertEqual(promo_code, donation.code)
d = fudge.Fake().has_attr(amount=random_amount)
self.assertAlmostEqual(promo_code.calculate(d),
donation.amount, places=2)
def test_saves_address_if_present(self):
name_kwargs = self.random_donor_kwargs
address_kwargs = self.random_address_kwargs
data = self.get_base_random_data(**name_kwargs)
data.update(self.prefix_data(address_kwargs, prefix="billing"))
self.client.post(self.url, data)
donor = models.Donor.objects.get(**name_kwargs)
address = models.DonorAddress.objects.get(**address_kwargs)
self.assertEqual(address, donor.address)
self.assertEqual(address, donor.mailing_address)
def test_saves_mailing_address_if_present(self):
name_kwargs = self.random_donor_kwargs
address_kwargs = self.random_address_kwargs
mailing_address_kwargs = self.random_address_kwargs
data = self.get_base_random_data(**name_kwargs)
data.update(self.prefix_data(address_kwargs, prefix="billing"))
data.update(self.prefix_data(mailing_address_kwargs, prefix="mailing"))
del data[constants.MAILING_SAME_AS_BILLING]
self.assertEqual(0, len(models.DonorAddress.objects.all()),
msg="sanity check")
self.client.post(self.url, data)
self.assertEqual(2, len(models.DonorAddress.objects.all()))
address = models.DonorAddress.objects.get(**address_kwargs)
mailing_address = models.DonorAddress.objects.get(
**mailing_address_kwargs)
self.assertNotEqual(address, mailing_address)
donor = models.Donor.objects.get(**name_kwargs)
self.assertEqual(address, donor.address)
self.assertEqual(mailing_address, donor.mailing_address)
def test_only_saves_donor_once(self):
"""
Verify the number of queries that are run.
This assumes that the tests are run in isolation from the backend.
This will pass if #17594 is merged in.
"""
data = self.random_post_data
with self.assertNumQueries(3):
self.client.post(self.url, data)
def test_saves_mailing_address_if_same_as_billing_is_checked(self):
data = self.random_post_data
data["mailing_same_as_billing"] = u"1"
self.client.post(self.url, data)
donor = models.Donor.objects.get(first_name=data["first_name"],
last_name=data["last_name"])
self.assertEqual(donor.address, donor.mailing_address)
def test_same_as_billing_overrides_second_address(self):
data = self.random_post_data
data.update(self.prefix_data(self.random_address_kwargs,
prefix="billing"))
data.update(self.prefix_data(self.random_address_kwargs,
prefix="mailing"))
data["mailing_same_as_billing"] = u"1"
self.client.post(self.url, data)
donor = models.Donor.objects.get(first_name=data["first_name"],
last_name=data["last_name"])
self.assertEqual(donor.address, donor.mailing_address)
def test_redirects_to_success_url_after_successful_save(self):
data = self.random_post_data
response = self.client.post(self.url, data)
self.assertRedirects(response, reverse("donations_thanks"))
def test_displays_error_on_donation_form_validation_error(self):
data = self.random_post_data
del data["ccv_code"]
response = self.client.post(self.url, data)
self.assert_template("armstrong/donations/donation.html", response)
self.assert_form_has_errors(response, "donation_form", ["ccv_code", ])
def test_displays_errors_on_address_validation_error(self):
data = self.random_post_data
data["billing-address"] = ""
response = self.client.post(self.url, data)
self.assert_template("armstrong/donations/donation.html", response)
self.assert_subform_has_errors(response, "billing_address_form")
def test_displays_errors_on_mailing_address_validation_error(self):
data = self.random_post_data
data.update(self.prefix_data(self.random_address_kwargs,
prefix="billing"))
data.update(self.prefix_data(self.random_address_kwargs,
prefix="mailing"))
del data["mailing_same_as_billing"]
data["mailing-address"] = ""
response = self.client.post(self.url, data)
self.assert_template("armstrong/donations/donation.html", response)
self.assert_subform_has_errors(response, "mailing_address_form")
@failed_purchase
def test_does_redisplays_form_on_failed_donation(self, response, **kwargs):
self.assertEqual(200, response.status_code)
self.assert_template("armstrong/donations/donation.html", response)
@failed_purchase
def test_error_msg_in_context_on_failed_purchase(self, response, **kwargs):
self.assert_value_in_context(response, "error_msg",
"Unable to process payment")
@failed_purchase
def test_reason_in_context_on_failed_purchase(self, response, random_text,
**kwargs):
self.assert_value_in_context(response, "reason", random_text)
@failed_purchase
def test_response_in_context_on_failed_purchase(self, response,
backend_response, **kwargs):
self.assert_value_in_context(response, "response", backend_response) | 0.340924 | 0.185228 |
from contact import Contact
def create_contact(fname,lname,phone,email):
'''
Function to create a new contact
'''
new_contact = Contact(fname,lname,phone,email)
return new_contact
def save_contacts(contact):
'''
Function to save contact
'''
contact.save_contact()
def del_contact(contact):
'''
Function to delete a contact
'''
contact.delete_contact()
def find_contact(number):
'''
Function that finds a contact by number and returns the contact
'''
return Contact.find_by_number(number)
def check_existing_contacts(number):
'''
Function that check if a contact exists with that number and return a Boolean
'''
return Contact.contact_exist(number)
def display_contacts():
'''
Function that returns all the saved contacts
'''
return Contact.display_contacts()
def main():
print("Hello Welcome to your contact list. What is your name?")
user_name = input()
print(f"Hello {user_name}. what would you like to do?")
print('\n')
while True:
print("Use these short codes : cc - create a new contact, dc - display contacts, fc -find a contact, ex -exit the contact list ")
short_code = input().lower()
if short_code == 'cc':
print("New Contact")
print("-"*10)
print ("First name ....")
f_name = input()
print("Last name ...")
l_name = input()
print("Phone number ...")
p_number = input()
print("Email address ...")
e_address = input()
save_contacts(create_contact(f_name,l_name,p_number,e_address)) # create and save new contact.
print ('\n')
print(f"New Contact {f_name} {l_name} created")
print ('\n')
elif short_code == 'dc':
if display_contacts():
print("Here is a list of all your contacts")
print('\n')
for contact in display_contacts():
print(f"{contact.first_name} {contact.last_name} .....{contact.phone_number}")
print('\n')
else:
print('\n')
print("You dont seem to have any contacts saved yet")
print('\n')
elif short_code == 'fc':
print("Enter the number you want to search for")
search_number = input()
if check_existing_contacts(search_number):
search_contact = find_contact(search_number)
print(f"{search_contact.first_name} {search_contact.last_name}")
print('-' * 20)
print(f"Phone number.......{search_contact.phone_number}")
print(f"Email address.......{search_contact.email}")
else:
print("That contact does not exist")
elif short_code == "ex":
print("Bye .......")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main() | run.py | from contact import Contact
def create_contact(fname,lname,phone,email):
'''
Function to create a new contact
'''
new_contact = Contact(fname,lname,phone,email)
return new_contact
def save_contacts(contact):
'''
Function to save contact
'''
contact.save_contact()
def del_contact(contact):
'''
Function to delete a contact
'''
contact.delete_contact()
def find_contact(number):
'''
Function that finds a contact by number and returns the contact
'''
return Contact.find_by_number(number)
def check_existing_contacts(number):
'''
Function that check if a contact exists with that number and return a Boolean
'''
return Contact.contact_exist(number)
def display_contacts():
'''
Function that returns all the saved contacts
'''
return Contact.display_contacts()
def main():
print("Hello Welcome to your contact list. What is your name?")
user_name = input()
print(f"Hello {user_name}. what would you like to do?")
print('\n')
while True:
print("Use these short codes : cc - create a new contact, dc - display contacts, fc -find a contact, ex -exit the contact list ")
short_code = input().lower()
if short_code == 'cc':
print("New Contact")
print("-"*10)
print ("First name ....")
f_name = input()
print("Last name ...")
l_name = input()
print("Phone number ...")
p_number = input()
print("Email address ...")
e_address = input()
save_contacts(create_contact(f_name,l_name,p_number,e_address)) # create and save new contact.
print ('\n')
print(f"New Contact {f_name} {l_name} created")
print ('\n')
elif short_code == 'dc':
if display_contacts():
print("Here is a list of all your contacts")
print('\n')
for contact in display_contacts():
print(f"{contact.first_name} {contact.last_name} .....{contact.phone_number}")
print('\n')
else:
print('\n')
print("You dont seem to have any contacts saved yet")
print('\n')
elif short_code == 'fc':
print("Enter the number you want to search for")
search_number = input()
if check_existing_contacts(search_number):
search_contact = find_contact(search_number)
print(f"{search_contact.first_name} {search_contact.last_name}")
print('-' * 20)
print(f"Phone number.......{search_contact.phone_number}")
print(f"Email address.......{search_contact.email}")
else:
print("That contact does not exist")
elif short_code == "ex":
print("Bye .......")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main() | 0.300335 | 0.118615 |
from __future__ import print_function
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointNetDenseCls
import torch.nn.functional as F
if torch.cuda.is_available():
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='seg', help='output folder')
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False, class_choice = ['Chair'])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False, class_choice = ['Chair'], train = False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
os.makedirs(opt.outf)
except OSError:
pass
blue = lambda x:'\033[94m' + x + '\033[0m'
classifier = PointNetDenseCls(k = num_classes)
if opt.model != '':
classifier.load_state_dict(torch.load(opt.model))
optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
if torch.cuda.is_available():
classifier.cuda()
num_batch = len(dataset)/opt.batchSize
for epoch in range(opt.nepoch):
for i, data in enumerate(dataloader, 0):
points, target = data
points, target = Variable(points), Variable(target)
points = points.transpose(2,1)
if torch.cuda.is_available():
points, target = points.cuda(), target.cuda()
optimizer.zero_grad()
classifier = classifier.train()
pred, _ = classifier(points)
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
#print(pred.size(), target.size())
loss = F.nll_loss(pred, target)
loss.backward()
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize * 2500)))
if i % 10 == 0:
j, data = next(enumerate(testdataloader, 0))
points, target = data
points, target = Variable(points), Variable(target)
points = points.transpose(2,1)
if torch.cuda.is_available():
points, target = points.cuda(), target.cuda()
classifier = classifier.eval()
pred, _ = classifier(points)
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
loss = F.nll_loss(pred, target)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
print('[%d: %d/%d] %s loss: %f accuracy: %f' %(epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize * 2500)))
torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch)) | train_segmentation.py | from __future__ import print_function
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointNetDenseCls
import torch.nn.functional as F
if torch.cuda.is_available():
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='seg', help='output folder')
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False, class_choice = ['Chair'])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False, class_choice = ['Chair'], train = False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
os.makedirs(opt.outf)
except OSError:
pass
blue = lambda x:'\033[94m' + x + '\033[0m'
classifier = PointNetDenseCls(k = num_classes)
if opt.model != '':
classifier.load_state_dict(torch.load(opt.model))
optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
if torch.cuda.is_available():
classifier.cuda()
num_batch = len(dataset)/opt.batchSize
for epoch in range(opt.nepoch):
for i, data in enumerate(dataloader, 0):
points, target = data
points, target = Variable(points), Variable(target)
points = points.transpose(2,1)
if torch.cuda.is_available():
points, target = points.cuda(), target.cuda()
optimizer.zero_grad()
classifier = classifier.train()
pred, _ = classifier(points)
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
#print(pred.size(), target.size())
loss = F.nll_loss(pred, target)
loss.backward()
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize * 2500)))
if i % 10 == 0:
j, data = next(enumerate(testdataloader, 0))
points, target = data
points, target = Variable(points), Variable(target)
points = points.transpose(2,1)
if torch.cuda.is_available():
points, target = points.cuda(), target.cuda()
classifier = classifier.eval()
pred, _ = classifier(points)
pred = pred.view(-1, num_classes)
target = target.view(-1,1)[:,0] - 1
loss = F.nll_loss(pred, target)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
print('[%d: %d/%d] %s loss: %f accuracy: %f' %(epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize * 2500)))
torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch)) | 0.53777 | 0.249493 |
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import pandas as pd
from scipy import exp
from scipy.linalg import eigh
from scipy.spatial.distance import (
pdist,
squareform,
)
from sklearn.cross_validation import train_test_split
from sklearn.datasets import (
make_circles,
make_moons,
)
from sklearn.decomposition import (
KernelPCA,
PCA,
)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from visualization import plot_decision_regions
def plot_manual_lda_transformation(X, y):
np.set_printoptions(precision=4)
print("Class label distribution: %s" % np.bincount(y)[1:])
num_features = 13
mean_vectors = []
for label in range(1, 4):
mean_vectors.append(
np.mean(X[y == label], axis=0).reshape(num_features, 1)
)
print("MV %s: %s\n" % (label, mean_vectors[label-1].T))
mean_overall = np.mean(X, axis=0).reshape(num_features, 1)
S_W_unscaled = np.zeros((num_features, num_features))
S_W = np.zeros((num_features, num_features))
S_B = np.zeros((num_features, num_features))
for label, mean_vector in zip(range(1, 4), mean_vectors):
class_scatter = np.zeros((num_features, num_features))
for row in X[y == label]:
row = row.reshape(num_features, 1)
class_scatter += (row - mean_vector).dot((row - mean_vector).T)
S_W_unscaled += class_scatter
S_W += np.cov(X[y == label].T)
n = X[y == label, :].shape[0]
S_B += n * (mean_vector - mean_overall).dot(
(mean_vector - mean_overall).T
)
print(
"Unscaled within-class scatter matrix: %sx%s" %
(S_W_unscaled.shape[0], S_W_unscaled.shape[1])
)
print(
"Scaled within-class scatter matrix: %sx%s" %
(S_W.shape[0], S_W.shape[1])
)
print(
"Between-class scatter matrix: %sx%s" %
(S_B.shape[0], S_B.shape[1])
)
eigenvalues, eigenvectors = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
eigenpairs = [
(np.abs(eigenvalue), eigenvectors[:, index])
for index, eigenvalue
in enumerate(eigenvalues)
]
eigenpairs = sorted(eigenpairs, key=lambda k: k[0], reverse=True)
print("Eigenvalues in decreasing order: \n")
for eigenpair in eigenpairs:
print(eigenpair[0])
tot = sum(eigenvalues.real)
discr = [i/tot for i in map(lambda p: p[0], eigenpairs)]
cum_discr = np.cumsum(discr)
plt.bar(
range(1, 14),
discr,
alpha=0.5,
align='center',
label='individual "discriminability"',
)
plt.step(
range(1, 14),
cum_discr,
where='mid',
label='cumulative "discriminability"',
)
plt.ylabel('"discriminability" ratio')
plt.xlabel('Linear Discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
plt.show()
w = np.hstack((
eigenpairs[0][1][:, np.newaxis].real,
eigenpairs[1][1][:, np.newaxis].real,
))
print('Matrix W:\n', w)
X_lda = X.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for label, color, marker in zip(np.unique(y), colors, markers):
plt.scatter(
X_lda[y == label, 0],
X_lda[y == label, 1],
c=color,
label=label,
marker=marker,
)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='upper right')
plt.show()
def plot_sklearn_lda_with_lr(X_train, X_test, y_train, y_test):
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train, y_train)
lr = LogisticRegression()
lr = lr.fit(X_train_lda, y_train)
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
X_test_lda = lda.transform(X_test)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
def plot_manual_pca_transformation(X, y):
cov_mat = np.cov(X.T)
eigenvalues, eigenvectors = np.linalg.eig(cov_mat)
print("\nEigenvalues \n%s" % eigenvalues)
tot = sum(eigenvalues)
var_exp = [i/tot for i in sorted(eigenvalues, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
plt.bar(
range(1, 14),
var_exp,
alpha=0.5,
align='center',
label='individual explained variance',
)
plt.step(
range(1, 14),
cum_var_exp,
where='mid',
label='cumulative explained variance',
)
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show()
eigenpairs = [
(np.abs(eigenvalue), eigenvectors[:, index])
for index, eigenvalue
in enumerate(eigenvalues)
]
eigenpairs.sort(reverse=True)
w = np.hstack((
eigenpairs[0][1][:, np.newaxis],
eigenpairs[1][1][:, np.newaxis],
))
print('Matrix W:\n%s\n' % w)
X_pca = X.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for label, color, marker in zip(np.unique(y), colors, markers):
plt.scatter(
X_pca[y == label, 0],
X_pca[y == label, 1],
c=color,
label=label,
marker=marker,
)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
print(X_pca[0])
def plot_sklearn_pca_with_lr(X_train, X_test, y_train, y_test):
pca = PCA()
pca.fit(X_train)
print(pca.explained_variance_ratio_)
plt.bar(
range(1, 14),
pca.explained_variance_ratio_,
alpha=0.5,
align='center',
)
plt.step(
range(1, 14),
np.cumsum(pca.explained_variance_ratio_),
where='mid',
)
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.show()
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
lr = LogisticRegression()
lr = lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
def get_standardized_wine_data():
df = pd.read_csv(os.path.join('datasets', 'wine.data'), header=None)
df.columns = [
'Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash',
'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols',
'Proanthocyanins', 'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline',
]
X = df.iloc[:, 1:].values
y = df.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.3,
random_state=0,
)
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
return X_train_std, X_test_std, y_train, y_test
def rbf_kernel_pca(X, gamma, n_components):
sq_dists = pdist(X, 'sqeuclidean')
mat_sq_dists = squareform(sq_dists)
K = exp(-gamma * mat_sq_dists)
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
eigenvalues, eigenvectors = eigh(K)
alphas = np.column_stack((
eigenvectors[:, -i] for i in range(1, n_components+1)
))
lambdas = [eigenvalues[-i] for i in range(1, n_components+1)]
return alphas, lambdas
def plot_pca_for_data(data_type, n_samples):
if data_type == 'half_circles':
X, y = make_moons(n_samples=n_samples, random_state=123)
format_x_axis = True
elif data_type == 'concentric_circles':
X, y = make_circles(
n_samples=n_samples,
random_state=123,
noise=0.1,
factor=0.2,
)
format_x_axis = False
plt.scatter(
X[y == 0, 0],
X[y == 0, 1],
color='red',
marker='^',
alpha=0.5,
)
plt.scatter(
X[y == 1, 0],
X[y == 1, 1],
color='blue',
marker='o',
alpha=0.5,
)
plt.show()
X_spca = PCA(n_components=2).fit_transform(X)
X_kpca, _ = rbf_kernel_pca(X, gamma=15, n_components=2)
X_skernpca = KernelPCA(
n_components=2,
kernel='rbf',
gamma=15,
).fit_transform(X)
for index, X_pca in enumerate((X_spca, X_kpca, X_skernpca)):
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(
X_pca[y == 0, 0],
X_pca[y == 0, 1],
color='red',
marker='^',
alpha=0.5,
)
ax[0].scatter(
X_pca[y == 1, 0],
X_pca[y == 1, 1],
color='blue',
marker='o',
alpha=0.5,
)
ax[1].scatter(
X_pca[y == 0, 0],
np.zeros((n_samples/2, 1))+0.02,
color='red',
marker='^',
alpha=0.5,
)
ax[1].scatter(
X_pca[y == 1, 0],
np.zeros((n_samples/2, 1))-0.02,
color='blue',
marker='o',
alpha=0.5,
)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
if format_x_axis and (index == 1):
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
def plot_new_data_with_kernel_pca():
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1)
x_new = X[25]
print("x_new: %s" % x_new)
x_proj = alphas[25]
print("x_proj: %s" % x_proj)
x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas)
print("x_reproj: %s" % x_reproj)
plt.scatter(
alphas[y == 0, 0],
np.zeros(50),
color='red',
marker='^',
alpha=0.5,
)
plt.scatter(
alphas[y == 1, 0],
np.zeros(50),
color='blue',
marker='o',
alpha=0.5,
)
plt.scatter(
x_proj,
0,
color='black',
label='original projection of point X[25]',
marker='^',
s=100,
)
plt.scatter(
x_reproj,
0,
color='green',
label='remapped point X[25]',
marker='x',
s=500,
)
plt.legend(scatterpoints=1)
plt.show()
if __name__ == '__main__':
X_train, X_test, y_train, y_test = get_standardized_wine_data()
# plot_manual_pca_transformation(X_train, y_train)
# plot_sklearn_pca_with_lr(X_train, X_test, y_train, y_test)
# plot_manual_lda_transformation(X_train, y_train)
# plot_sklearn_lda_with_lr(X_train, X_test, y_train, y_test)
plot_pca_for_data(data_type='half_circles', n_samples=100)
# plot_pca_for_data(data_type='concentric_circles', n_samples=1000)
# plot_new_data_with_kernel_pca() | chapter_5.py | import os
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import pandas as pd
from scipy import exp
from scipy.linalg import eigh
from scipy.spatial.distance import (
pdist,
squareform,
)
from sklearn.cross_validation import train_test_split
from sklearn.datasets import (
make_circles,
make_moons,
)
from sklearn.decomposition import (
KernelPCA,
PCA,
)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from visualization import plot_decision_regions
def plot_manual_lda_transformation(X, y):
np.set_printoptions(precision=4)
print("Class label distribution: %s" % np.bincount(y)[1:])
num_features = 13
mean_vectors = []
for label in range(1, 4):
mean_vectors.append(
np.mean(X[y == label], axis=0).reshape(num_features, 1)
)
print("MV %s: %s\n" % (label, mean_vectors[label-1].T))
mean_overall = np.mean(X, axis=0).reshape(num_features, 1)
S_W_unscaled = np.zeros((num_features, num_features))
S_W = np.zeros((num_features, num_features))
S_B = np.zeros((num_features, num_features))
for label, mean_vector in zip(range(1, 4), mean_vectors):
class_scatter = np.zeros((num_features, num_features))
for row in X[y == label]:
row = row.reshape(num_features, 1)
class_scatter += (row - mean_vector).dot((row - mean_vector).T)
S_W_unscaled += class_scatter
S_W += np.cov(X[y == label].T)
n = X[y == label, :].shape[0]
S_B += n * (mean_vector - mean_overall).dot(
(mean_vector - mean_overall).T
)
print(
"Unscaled within-class scatter matrix: %sx%s" %
(S_W_unscaled.shape[0], S_W_unscaled.shape[1])
)
print(
"Scaled within-class scatter matrix: %sx%s" %
(S_W.shape[0], S_W.shape[1])
)
print(
"Between-class scatter matrix: %sx%s" %
(S_B.shape[0], S_B.shape[1])
)
eigenvalues, eigenvectors = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
eigenpairs = [
(np.abs(eigenvalue), eigenvectors[:, index])
for index, eigenvalue
in enumerate(eigenvalues)
]
eigenpairs = sorted(eigenpairs, key=lambda k: k[0], reverse=True)
print("Eigenvalues in decreasing order: \n")
for eigenpair in eigenpairs:
print(eigenpair[0])
tot = sum(eigenvalues.real)
discr = [i/tot for i in map(lambda p: p[0], eigenpairs)]
cum_discr = np.cumsum(discr)
plt.bar(
range(1, 14),
discr,
alpha=0.5,
align='center',
label='individual "discriminability"',
)
plt.step(
range(1, 14),
cum_discr,
where='mid',
label='cumulative "discriminability"',
)
plt.ylabel('"discriminability" ratio')
plt.xlabel('Linear Discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
plt.show()
w = np.hstack((
eigenpairs[0][1][:, np.newaxis].real,
eigenpairs[1][1][:, np.newaxis].real,
))
print('Matrix W:\n', w)
X_lda = X.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for label, color, marker in zip(np.unique(y), colors, markers):
plt.scatter(
X_lda[y == label, 0],
X_lda[y == label, 1],
c=color,
label=label,
marker=marker,
)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='upper right')
plt.show()
def plot_sklearn_lda_with_lr(X_train, X_test, y_train, y_test):
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train, y_train)
lr = LogisticRegression()
lr = lr.fit(X_train_lda, y_train)
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
X_test_lda = lda.transform(X_test)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.show()
def plot_manual_pca_transformation(X, y):
cov_mat = np.cov(X.T)
eigenvalues, eigenvectors = np.linalg.eig(cov_mat)
print("\nEigenvalues \n%s" % eigenvalues)
tot = sum(eigenvalues)
var_exp = [i/tot for i in sorted(eigenvalues, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
plt.bar(
range(1, 14),
var_exp,
alpha=0.5,
align='center',
label='individual explained variance',
)
plt.step(
range(1, 14),
cum_var_exp,
where='mid',
label='cumulative explained variance',
)
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show()
eigenpairs = [
(np.abs(eigenvalue), eigenvectors[:, index])
for index, eigenvalue
in enumerate(eigenvalues)
]
eigenpairs.sort(reverse=True)
w = np.hstack((
eigenpairs[0][1][:, np.newaxis],
eigenpairs[1][1][:, np.newaxis],
))
print('Matrix W:\n%s\n' % w)
X_pca = X.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for label, color, marker in zip(np.unique(y), colors, markers):
plt.scatter(
X_pca[y == label, 0],
X_pca[y == label, 1],
c=color,
label=label,
marker=marker,
)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
print(X_pca[0])
def plot_sklearn_pca_with_lr(X_train, X_test, y_train, y_test):
pca = PCA()
pca.fit(X_train)
print(pca.explained_variance_ratio_)
plt.bar(
range(1, 14),
pca.explained_variance_ratio_,
alpha=0.5,
align='center',
)
plt.step(
range(1, 14),
np.cumsum(pca.explained_variance_ratio_),
where='mid',
)
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.show()
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
lr = LogisticRegression()
lr = lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
def get_standardized_wine_data():
df = pd.read_csv(os.path.join('datasets', 'wine.data'), header=None)
df.columns = [
'Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash',
'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols',
'Proanthocyanins', 'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline',
]
X = df.iloc[:, 1:].values
y = df.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.3,
random_state=0,
)
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
return X_train_std, X_test_std, y_train, y_test
def rbf_kernel_pca(X, gamma, n_components):
sq_dists = pdist(X, 'sqeuclidean')
mat_sq_dists = squareform(sq_dists)
K = exp(-gamma * mat_sq_dists)
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
eigenvalues, eigenvectors = eigh(K)
alphas = np.column_stack((
eigenvectors[:, -i] for i in range(1, n_components+1)
))
lambdas = [eigenvalues[-i] for i in range(1, n_components+1)]
return alphas, lambdas
def plot_pca_for_data(data_type, n_samples):
if data_type == 'half_circles':
X, y = make_moons(n_samples=n_samples, random_state=123)
format_x_axis = True
elif data_type == 'concentric_circles':
X, y = make_circles(
n_samples=n_samples,
random_state=123,
noise=0.1,
factor=0.2,
)
format_x_axis = False
plt.scatter(
X[y == 0, 0],
X[y == 0, 1],
color='red',
marker='^',
alpha=0.5,
)
plt.scatter(
X[y == 1, 0],
X[y == 1, 1],
color='blue',
marker='o',
alpha=0.5,
)
plt.show()
X_spca = PCA(n_components=2).fit_transform(X)
X_kpca, _ = rbf_kernel_pca(X, gamma=15, n_components=2)
X_skernpca = KernelPCA(
n_components=2,
kernel='rbf',
gamma=15,
).fit_transform(X)
for index, X_pca in enumerate((X_spca, X_kpca, X_skernpca)):
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(
X_pca[y == 0, 0],
X_pca[y == 0, 1],
color='red',
marker='^',
alpha=0.5,
)
ax[0].scatter(
X_pca[y == 1, 0],
X_pca[y == 1, 1],
color='blue',
marker='o',
alpha=0.5,
)
ax[1].scatter(
X_pca[y == 0, 0],
np.zeros((n_samples/2, 1))+0.02,
color='red',
marker='^',
alpha=0.5,
)
ax[1].scatter(
X_pca[y == 1, 0],
np.zeros((n_samples/2, 1))-0.02,
color='blue',
marker='o',
alpha=0.5,
)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
if format_x_axis and (index == 1):
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
def plot_new_data_with_kernel_pca():
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1)
x_new = X[25]
print("x_new: %s" % x_new)
x_proj = alphas[25]
print("x_proj: %s" % x_proj)
x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas)
print("x_reproj: %s" % x_reproj)
plt.scatter(
alphas[y == 0, 0],
np.zeros(50),
color='red',
marker='^',
alpha=0.5,
)
plt.scatter(
alphas[y == 1, 0],
np.zeros(50),
color='blue',
marker='o',
alpha=0.5,
)
plt.scatter(
x_proj,
0,
color='black',
label='original projection of point X[25]',
marker='^',
s=100,
)
plt.scatter(
x_reproj,
0,
color='green',
label='remapped point X[25]',
marker='x',
s=500,
)
plt.legend(scatterpoints=1)
plt.show()
if __name__ == '__main__':
X_train, X_test, y_train, y_test = get_standardized_wine_data()
# plot_manual_pca_transformation(X_train, y_train)
# plot_sklearn_pca_with_lr(X_train, X_test, y_train, y_test)
# plot_manual_lda_transformation(X_train, y_train)
# plot_sklearn_lda_with_lr(X_train, X_test, y_train, y_test)
plot_pca_for_data(data_type='half_circles', n_samples=100)
# plot_pca_for_data(data_type='concentric_circles', n_samples=1000)
# plot_new_data_with_kernel_pca() | 0.586641 | 0.528412 |
# Standard libraries
import os
import sys
import logging
import inspect
import hashlib
from pathlib import Path
from multiprocessing import cpu_count, Pool
# Third party libraries
import joblib
import numpy as np
import pandas as pd
# User defined libraries
from .file import mkdir
class ProgressBar:
def __init__(self, n_batch, bar_len=80):
"""Brief description.
Detailed description.
Parameters
----------
bar_len: int
The length you want to display your bar.
n_batch: int
Total rounds to iterate.
Returns
-------
None
Examples
--------
import time
progressBar = ProgressBar(100)
for i in range(100):
progressBar.step(i)
time.sleep(0.1)
"""
self.bar_len = bar_len
self.progress_used = 0
self.progress_remanent = bar_len
self.n_batch = n_batch
def step(self, i):
self.progress_used = int(round(i * self.bar_len / self.n_batch))
self.progress_remanent = self.bar_len - self.progress_used
sys.stdout.write(
"\r"
+ ">" * self.progress_used
+ "Epoch Progress: "
+ "{:.2%}".format((i) / self.n_batch)
+ "=" * self.progress_remanent
)
sys.stdout.flush()
def csv_2_pickle(paths):
""" Convert csv files into pickle format files.
Parameters
----------
paths: list
Csv file paths.
Examples
--------
PATH = Path("data/raw/")
CSV = [str(i) for i in list(PATH.glob("*.csv"))]
csv_2_pickle(CSV)
"""
PATH = Path("data/raw/")
paths = [str(i) for i in list(PATH.glob("*.csv"))]
for path in paths:
data = pd.read_csv(path)
data.columns = list(map(str.lower, data.columns))
joblib.dump(data, path.split("csv")[0] + "p")
def generate_md5_token_from_dict(input_params):
""" Generate distinct md5 token from a dictionary.
初衷是为了将输入一个函数的输入参数内容编码为独一无二的md5编码, 方便在其变动的时候进行检测.
Parameters
----------
input_params : dict
Dictionary to be encoded.
Returns
-------
str
Encoded md5 token from input_params.
"""
input_params_token = ""
# print(">>"*88)
# print(input_params)
# print(">>"*88)
for v in list(input_params["kwargs"].values()) + list(input_params["args"]) + list(input_params["feature_path"]):
if type(v) in [pd.DataFrame, pd.Series]:
input_params_token += "pandas_" + str(v.memory_usage().sum()) + "_" + str(v.shape) + "_"
elif type(v) in [np.ndarray]:
input_params_token += "numpy_" + str(v.mean()) + "_" + str(v.shape) + "_"
elif type(v) in [list, tuple, set]:
input_params_token += "list_" + str(v) + "_"
elif type(v) == str:
input_params_token += "str_" + v + "_"
elif type(v) in [int, float]:
input_params_token += "numeric_" + str(v) + "_"
elif type(v) == bool:
input_params_token += "bool_" + str(v) + "_"
elif type(v) == dict:
input_params_token += "dict_" + str(v) + "_"
else:
raise "Add type {}".format(type(v))
m = hashlib.md5(input_params_token.encode("gb2312")).hexdigest()
return m
def cache(feature_path="data/features/"):
# https://foofish.net/python-decorator.html
def decorator(func):
def wrapper(*args, **kwargs):
# =========================================================================================================
# 将输入所有输入参数的内容编码为md5, 以后每次检测是否变动, 缺陷(pandas类型)只检查了[内存]和[形状]).
# =========================================================================================================
input_params = locals().copy()
feature_code_input_params_this_time = generate_md5_token_from_dict(input_params)
# ==============================================================================================================
# 检测保存数据和代码的文件夹是否存在, 如不存在则进行新建.
# ==============================================================================================================
function_name = func.__name__
data_folder_path = feature_path + "data/"
code_folder_path = feature_path + "code/"
mkdir(data_folder_path)
mkdir(code_folder_path)
# 生成数据文件路径
feature_data_path = data_folder_path + function_name + "_" + feature_code_input_params_this_time + ".p"
is_data_cached = os.path.exists(feature_data_path)
# 生成代码文件路径
feature_code_path = code_folder_path + function_name + "_" + feature_code_input_params_this_time + ".p"
is_code_cached = os.path.exists(feature_code_path)
# 获取本次代码的内容
feature_code_this_time = inspect.getsource(func)
# 如果探测变动代码目录下没有 "函数代码文件" --> 重新运行一次这个函数, 并存储所有校正信息.
if not is_code_cached:
print("{} code file is not exist!".format(func.__name__))
feature_data = func(*args, **kwargs)
joblib.dump(feature_data, feature_data_path)
joblib.dump(feature_code_this_time, feature_code_path)
return feature_data
# 如果 "函数代码文件" 变动 --> 重新运行一次这个函数, 并存储所有校正信息.
feature_code_last_time = joblib.load(feature_code_path)
flag_code_changed = feature_code_this_time != feature_code_last_time
if flag_code_changed:
print("{} code file has been changed!".format(func.__name__))
feature_data = func(*args, **kwargs)
joblib.dump(feature_data, feature_data_path)
joblib.dump(feature_code_this_time, feature_code_path)
return feature_data
# 如果 "操作对象生成数据"不存在 --> 存储这次 "操作对象代码" 并生成 "操作对象生成数据"
if not is_data_cached:
print("{} feature file is not exist!".format(func.__name__))
feature_data = func(*args, **kwargs)
joblib.dump(feature_data, feature_data_path)
joblib.dump(feature_code_this_time, feature_code_path)
return feature_data
feature_data = joblib.load(feature_data_path)
print("Restore feature from {}".format(feature_data_path))
return feature_data
return wrapper
return decorator
def parallelize(df, func):
""" Split data into max core partitions and execute func in parallel.
https://www.machinelearningplus.com/python/parallel-processing-python/
Parameters
----------
df : pandas Dataframe
func : any functions
Returns
-------
data : pandas Dataframe
Returned dataframe of func.
"""
cores = cpu_count()
data_split = np.array_split(df, cores)
pool = Pool(cores)
data = pd.concat(pool.map(func, data_split), ignore_index=1)
pool.close()
pool.join()
return data
def get_logger(logger_name, filename=None):
""" Logger that can print message to console and file.
Reference: https://juejin.im/post/5bc2bd3a5188255c94465d31
Parameters
----------
logger_name : str
Logger name, can be any string.
filename : str, optional default None
Path of log file
Returns
-------
logger : RootLogger
Python logger instance.
"""
# NOTSET(0)、DEBUG(10)、INFO(20)、WARNING(30)、ERROR(40)、CRITICAL(50)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s %(filename)s : %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %A %H:%M:%S"
)
# Output to console
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Output to file Add handler to logger, left log can to print to both console and file.
if filename is None:
file_handler = logging.FileHandler(filename=filename, mode="w")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger | tabular_buddy/utils/helper.py |
# Standard libraries
import os
import sys
import logging
import inspect
import hashlib
from pathlib import Path
from multiprocessing import cpu_count, Pool
# Third party libraries
import joblib
import numpy as np
import pandas as pd
# User defined libraries
from .file import mkdir
class ProgressBar:
def __init__(self, n_batch, bar_len=80):
"""Brief description.
Detailed description.
Parameters
----------
bar_len: int
The length you want to display your bar.
n_batch: int
Total rounds to iterate.
Returns
-------
None
Examples
--------
import time
progressBar = ProgressBar(100)
for i in range(100):
progressBar.step(i)
time.sleep(0.1)
"""
self.bar_len = bar_len
self.progress_used = 0
self.progress_remanent = bar_len
self.n_batch = n_batch
def step(self, i):
self.progress_used = int(round(i * self.bar_len / self.n_batch))
self.progress_remanent = self.bar_len - self.progress_used
sys.stdout.write(
"\r"
+ ">" * self.progress_used
+ "Epoch Progress: "
+ "{:.2%}".format((i) / self.n_batch)
+ "=" * self.progress_remanent
)
sys.stdout.flush()
def csv_2_pickle(paths):
""" Convert csv files into pickle format files.
Parameters
----------
paths: list
Csv file paths.
Examples
--------
PATH = Path("data/raw/")
CSV = [str(i) for i in list(PATH.glob("*.csv"))]
csv_2_pickle(CSV)
"""
PATH = Path("data/raw/")
paths = [str(i) for i in list(PATH.glob("*.csv"))]
for path in paths:
data = pd.read_csv(path)
data.columns = list(map(str.lower, data.columns))
joblib.dump(data, path.split("csv")[0] + "p")
def generate_md5_token_from_dict(input_params):
""" Generate distinct md5 token from a dictionary.
初衷是为了将输入一个函数的输入参数内容编码为独一无二的md5编码, 方便在其变动的时候进行检测.
Parameters
----------
input_params : dict
Dictionary to be encoded.
Returns
-------
str
Encoded md5 token from input_params.
"""
input_params_token = ""
# print(">>"*88)
# print(input_params)
# print(">>"*88)
for v in list(input_params["kwargs"].values()) + list(input_params["args"]) + list(input_params["feature_path"]):
if type(v) in [pd.DataFrame, pd.Series]:
input_params_token += "pandas_" + str(v.memory_usage().sum()) + "_" + str(v.shape) + "_"
elif type(v) in [np.ndarray]:
input_params_token += "numpy_" + str(v.mean()) + "_" + str(v.shape) + "_"
elif type(v) in [list, tuple, set]:
input_params_token += "list_" + str(v) + "_"
elif type(v) == str:
input_params_token += "str_" + v + "_"
elif type(v) in [int, float]:
input_params_token += "numeric_" + str(v) + "_"
elif type(v) == bool:
input_params_token += "bool_" + str(v) + "_"
elif type(v) == dict:
input_params_token += "dict_" + str(v) + "_"
else:
raise "Add type {}".format(type(v))
m = hashlib.md5(input_params_token.encode("gb2312")).hexdigest()
return m
def cache(feature_path="data/features/"):
# https://foofish.net/python-decorator.html
def decorator(func):
def wrapper(*args, **kwargs):
# =========================================================================================================
# 将输入所有输入参数的内容编码为md5, 以后每次检测是否变动, 缺陷(pandas类型)只检查了[内存]和[形状]).
# =========================================================================================================
input_params = locals().copy()
feature_code_input_params_this_time = generate_md5_token_from_dict(input_params)
# ==============================================================================================================
# 检测保存数据和代码的文件夹是否存在, 如不存在则进行新建.
# ==============================================================================================================
function_name = func.__name__
data_folder_path = feature_path + "data/"
code_folder_path = feature_path + "code/"
mkdir(data_folder_path)
mkdir(code_folder_path)
# 生成数据文件路径
feature_data_path = data_folder_path + function_name + "_" + feature_code_input_params_this_time + ".p"
is_data_cached = os.path.exists(feature_data_path)
# 生成代码文件路径
feature_code_path = code_folder_path + function_name + "_" + feature_code_input_params_this_time + ".p"
is_code_cached = os.path.exists(feature_code_path)
# 获取本次代码的内容
feature_code_this_time = inspect.getsource(func)
# 如果探测变动代码目录下没有 "函数代码文件" --> 重新运行一次这个函数, 并存储所有校正信息.
if not is_code_cached:
print("{} code file is not exist!".format(func.__name__))
feature_data = func(*args, **kwargs)
joblib.dump(feature_data, feature_data_path)
joblib.dump(feature_code_this_time, feature_code_path)
return feature_data
# 如果 "函数代码文件" 变动 --> 重新运行一次这个函数, 并存储所有校正信息.
feature_code_last_time = joblib.load(feature_code_path)
flag_code_changed = feature_code_this_time != feature_code_last_time
if flag_code_changed:
print("{} code file has been changed!".format(func.__name__))
feature_data = func(*args, **kwargs)
joblib.dump(feature_data, feature_data_path)
joblib.dump(feature_code_this_time, feature_code_path)
return feature_data
# 如果 "操作对象生成数据"不存在 --> 存储这次 "操作对象代码" 并生成 "操作对象生成数据"
if not is_data_cached:
print("{} feature file is not exist!".format(func.__name__))
feature_data = func(*args, **kwargs)
joblib.dump(feature_data, feature_data_path)
joblib.dump(feature_code_this_time, feature_code_path)
return feature_data
feature_data = joblib.load(feature_data_path)
print("Restore feature from {}".format(feature_data_path))
return feature_data
return wrapper
return decorator
def parallelize(df, func):
""" Split data into max core partitions and execute func in parallel.
https://www.machinelearningplus.com/python/parallel-processing-python/
Parameters
----------
df : pandas Dataframe
func : any functions
Returns
-------
data : pandas Dataframe
Returned dataframe of func.
"""
cores = cpu_count()
data_split = np.array_split(df, cores)
pool = Pool(cores)
data = pd.concat(pool.map(func, data_split), ignore_index=1)
pool.close()
pool.join()
return data
def get_logger(logger_name, filename=None):
""" Logger that can print message to console and file.
Reference: https://juejin.im/post/5bc2bd3a5188255c94465d31
Parameters
----------
logger_name : str
Logger name, can be any string.
filename : str, optional default None
Path of log file
Returns
-------
logger : RootLogger
Python logger instance.
"""
# NOTSET(0)、DEBUG(10)、INFO(20)、WARNING(30)、ERROR(40)、CRITICAL(50)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s %(filename)s : %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %A %H:%M:%S"
)
# Output to console
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Output to file Add handler to logger, left log can to print to both console and file.
if filename is None:
file_handler = logging.FileHandler(filename=filename, mode="w")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger | 0.366817 | 0.120051 |
from tkinter import Toplevel, Frame, BOTH
from tkinter.scrolledtext import ScrolledText
from io import StringIO
import re
import sys
import traceback
class DebugConsole:
def __init__(self, root, title='', locals={}, destroy=None):
self.root = Toplevel(root)
self.root.wm_title("Debug console")
self.root.wm_geometry("480x640")
self.locals = locals
self.title = title
self.indent = 0
self.block = ''
self.pos = None
if destroy is None:
self.root.bind("<Key-Escape>", self._quit)
else:
self.root.bind("<Key-Escape>", destroy)
self.orig_stdout = sys.stdout
sys.stdout = StringIO()
self.orig_stderr = sys.stderr
sys.stderr = StringIO()
self.initFrame()
def _quit(self):
self.root.quit()
self.root.destroy()
def initFrame(self):
self.frame = Frame(self.root)
self.frame.pack(fill=BOTH, expand=True)
self.console = ScrolledText(self.frame,
bg="black", fg='orange', font=('Courier', 15),
insertbackground='orange')
self.console.pack(side="left", expand=True, fill=BOTH)
if self.title:
self.console.insert("insert", '='*(2+len(self.title))+'\n')
self.console.insert("insert", '= '+self.title+'\n')
self.console.insert("insert", '='*(2+len(self.title))+'\n')
self.console.insert("insert", '>')
self.setLineStart()
self.history = []
self.history_index = -1
self.console.bind("<Key>", lambda e: self.insChar(e))
self.console.bind("<Key-Return>", lambda e: self.run())
self.console.bind("<Control-a>", lambda e: self.ctrlA())
self.console.bind("<Control-A>", lambda e: self.ctrlA())
self.console.bind("<Control-u>", lambda e: self.ctrlU())
self.console.bind("<Control-U>", lambda e: self.ctrlU())
self.console.bind("<Control-l>", lambda e: self.ctrlL())
self.console.bind("<Control-L>", lambda e: self.ctrlL())
self.console.bind("<BackSpace>", lambda e: self.delChar())
self.console.bind("<Button-1>", lambda e: self.click())
self.console.bind("<ButtonRelease-1>", lambda e: self.clack())
self.console.bind("<B1-Motion>", self.cmove)
self.console.bind("<ButtonRelease-2>", lambda e: "break")
self.after_id = self.console.after(500, self.monitor)
def setLineStart(self, offset=0):
r, c = self.console.index('end').split('.')
self.lineStart = '{}.{}'.format(int(r)-1, int(c)+1+offset)
def runEnd(self, insert, offset=0, error=None):
self.console.insert('end', '\n'+insert)
sys.stdout = StringIO()
sys.stderr = StringIO()
self.after_id = self.console.after(500, self.monitor)
if error is not None:
raise error
self.setLineStart(offset)
self.console.mark_set('insert', 'end')
self.console.see('end')
return 'break'
def runBlock(self, cmd):
if len(cmd):
if self.indent or cmd.endswith(':'):
self.block += self.indent*' '+cmd+'\n'
if cmd.endswith(':'):
self.indent += 1
dots = '...'*self.indent
return self.runEnd(dots, len(dots)-1)
if self.block:
self.indent -= 1
if self.indent:
dots = '...'*self.indent
return self.runEnd(dots, len(dots)-1)
cmd = self.block
self.block = ''
return cmd
def run(self):
self.console.after_cancel(self.after_id)
self.currentLine = ""
try:
cmd = self.console.get(self.lineStart, 'end').strip()
self.console.replace(self.lineStart, 'end', cmd)
cmd = re.sub(r'\s', ' ', cmd)
self.history.append(cmd)
self.history_index = len(self.history)
cmd = self.runBlock(cmd)
if cmd == "break":
return "break"
if cmd:
locs = locals()
locs.update(self.locals)
# In case we want to override self when debugging.
s = self.locals.get('self', self)
if cmd in locs:
cmd = 'print({})'.format(cmd)
if re.match(r'(import|for|while|if|from) |[a-zA-Z_]\w*\s*=', cmd):
ret = exec(cmd, globals(), locs)
else:
ret = eval(cmd, globals(), locs)
if ret is not None:
print(ret)
# More trickery
self.locals.update(locals())
self.locals['self'] = s
if re.match(r'self\s*[+*/-|&]?=', cmd):
self.locals['self'] = eval(cmd.split('=', 1)[1].strip(), globals(), self.locals)
except Exception:
traceback.print_exc()
return self.runEnd(sys.stdout.getvalue()+sys.stderr.getvalue()+'>')
def monitor(self):
if sys.stdout.getvalue() or sys.stderr.getvalue():
self.run()
self.after_id = self.console.after(500, self.monitor)
def delChar(self):
c = self.console
try:
if c.tag_ranges('sel') or c.index('insert') == self.lineStart:
return "break"
except Exception:
pass
def ctrlA(self):
self.console.mark_set('insert', self.lineStart)
return "break"
def ctrlU(self):
self.console.replace(self.lineStart, 'end', '')
return "break"
def ctrlL(self):
self.console.replace('1.0 + {} chars'.format(3*(len(self.title)+3)), 'end - 1 chars', '>')
self.setLineStart()
self.console.mark_set('insert', self.lineStart)
return "break"
def insChar(self, event):
if self.pos is not None:
return "break"
c = self.console
if event.keysym == "Up":
if self.history_index < 0:
return "break"
if self.history_index:
self.history_index -= 1
c.replace(self.lineStart, 'end', self.history[self.history_index])
c.mark_set('insert', 'end')
return "break"
if event.keysym == "Down":
if self.history_index < 0:
return "break"
if self.history_index < len(self.history):
self.history_index += 1
if self.history_index == len(self.history):
c.replace(self.lineStart, 'end', self.currentLine.strip('\n'))
return "break"
c.replace(self.lineStart, 'end', self.history[self.history_index])
c.mark_set('insert', 'end')
return "break"
if event.keysym == "Left" and c.index('insert') == self.lineStart:
return "break"
self.currentLine = c.get(self.lineStart, 'end')
if (event.char or not event.keysym) and c.tag_ranges('sel'):
c.selection_clear()
def mark(self, x=None, y=None):
if x is None:
i = 'current'
else:
i = '@{}, {}'.format(x, y)
r, c = [int(x) for x in self.console.index(i).split('.')]
if r == self.click_pos[0] and r == self.click_pos[1]:
return
if r < self.click_pos[0] or (r == self.click_pos[0] and c < self.click_pos[1]):
self.console.tag_add('sel', i, '{}.{}'.format(*self.click_pos))
else:
self.console.tag_add('sel', '{}.{}'.format(*self.click_pos), i)
def cmove(self, event):
if self.pos is None:
return "break"
self.mark(event.x, event.y)
return "break"
def click(self):
self.console.selection_clear()
self.pos = self.console.index('insert')
self.click_pos = [int(x) for x in self.console.index('current').split('.')]
self.console.focus()
return "break"
def clack(self):
self.console.mark_set('insert', self.pos)
self.mark()
self.pos = None
return "break" | fitter/gui/debugConsole.py | from tkinter import Toplevel, Frame, BOTH
from tkinter.scrolledtext import ScrolledText
from io import StringIO
import re
import sys
import traceback
class DebugConsole:
def __init__(self, root, title='', locals={}, destroy=None):
self.root = Toplevel(root)
self.root.wm_title("Debug console")
self.root.wm_geometry("480x640")
self.locals = locals
self.title = title
self.indent = 0
self.block = ''
self.pos = None
if destroy is None:
self.root.bind("<Key-Escape>", self._quit)
else:
self.root.bind("<Key-Escape>", destroy)
self.orig_stdout = sys.stdout
sys.stdout = StringIO()
self.orig_stderr = sys.stderr
sys.stderr = StringIO()
self.initFrame()
def _quit(self):
self.root.quit()
self.root.destroy()
def initFrame(self):
self.frame = Frame(self.root)
self.frame.pack(fill=BOTH, expand=True)
self.console = ScrolledText(self.frame,
bg="black", fg='orange', font=('Courier', 15),
insertbackground='orange')
self.console.pack(side="left", expand=True, fill=BOTH)
if self.title:
self.console.insert("insert", '='*(2+len(self.title))+'\n')
self.console.insert("insert", '= '+self.title+'\n')
self.console.insert("insert", '='*(2+len(self.title))+'\n')
self.console.insert("insert", '>')
self.setLineStart()
self.history = []
self.history_index = -1
self.console.bind("<Key>", lambda e: self.insChar(e))
self.console.bind("<Key-Return>", lambda e: self.run())
self.console.bind("<Control-a>", lambda e: self.ctrlA())
self.console.bind("<Control-A>", lambda e: self.ctrlA())
self.console.bind("<Control-u>", lambda e: self.ctrlU())
self.console.bind("<Control-U>", lambda e: self.ctrlU())
self.console.bind("<Control-l>", lambda e: self.ctrlL())
self.console.bind("<Control-L>", lambda e: self.ctrlL())
self.console.bind("<BackSpace>", lambda e: self.delChar())
self.console.bind("<Button-1>", lambda e: self.click())
self.console.bind("<ButtonRelease-1>", lambda e: self.clack())
self.console.bind("<B1-Motion>", self.cmove)
self.console.bind("<ButtonRelease-2>", lambda e: "break")
self.after_id = self.console.after(500, self.monitor)
def setLineStart(self, offset=0):
r, c = self.console.index('end').split('.')
self.lineStart = '{}.{}'.format(int(r)-1, int(c)+1+offset)
def runEnd(self, insert, offset=0, error=None):
self.console.insert('end', '\n'+insert)
sys.stdout = StringIO()
sys.stderr = StringIO()
self.after_id = self.console.after(500, self.monitor)
if error is not None:
raise error
self.setLineStart(offset)
self.console.mark_set('insert', 'end')
self.console.see('end')
return 'break'
def runBlock(self, cmd):
if len(cmd):
if self.indent or cmd.endswith(':'):
self.block += self.indent*' '+cmd+'\n'
if cmd.endswith(':'):
self.indent += 1
dots = '...'*self.indent
return self.runEnd(dots, len(dots)-1)
if self.block:
self.indent -= 1
if self.indent:
dots = '...'*self.indent
return self.runEnd(dots, len(dots)-1)
cmd = self.block
self.block = ''
return cmd
def run(self):
self.console.after_cancel(self.after_id)
self.currentLine = ""
try:
cmd = self.console.get(self.lineStart, 'end').strip()
self.console.replace(self.lineStart, 'end', cmd)
cmd = re.sub(r'\s', ' ', cmd)
self.history.append(cmd)
self.history_index = len(self.history)
cmd = self.runBlock(cmd)
if cmd == "break":
return "break"
if cmd:
locs = locals()
locs.update(self.locals)
# In case we want to override self when debugging.
s = self.locals.get('self', self)
if cmd in locs:
cmd = 'print({})'.format(cmd)
if re.match(r'(import|for|while|if|from) |[a-zA-Z_]\w*\s*=', cmd):
ret = exec(cmd, globals(), locs)
else:
ret = eval(cmd, globals(), locs)
if ret is not None:
print(ret)
# More trickery
self.locals.update(locals())
self.locals['self'] = s
if re.match(r'self\s*[+*/-|&]?=', cmd):
self.locals['self'] = eval(cmd.split('=', 1)[1].strip(), globals(), self.locals)
except Exception:
traceback.print_exc()
return self.runEnd(sys.stdout.getvalue()+sys.stderr.getvalue()+'>')
def monitor(self):
if sys.stdout.getvalue() or sys.stderr.getvalue():
self.run()
self.after_id = self.console.after(500, self.monitor)
def delChar(self):
c = self.console
try:
if c.tag_ranges('sel') or c.index('insert') == self.lineStart:
return "break"
except Exception:
pass
def ctrlA(self):
self.console.mark_set('insert', self.lineStart)
return "break"
def ctrlU(self):
self.console.replace(self.lineStart, 'end', '')
return "break"
def ctrlL(self):
self.console.replace('1.0 + {} chars'.format(3*(len(self.title)+3)), 'end - 1 chars', '>')
self.setLineStart()
self.console.mark_set('insert', self.lineStart)
return "break"
def insChar(self, event):
if self.pos is not None:
return "break"
c = self.console
if event.keysym == "Up":
if self.history_index < 0:
return "break"
if self.history_index:
self.history_index -= 1
c.replace(self.lineStart, 'end', self.history[self.history_index])
c.mark_set('insert', 'end')
return "break"
if event.keysym == "Down":
if self.history_index < 0:
return "break"
if self.history_index < len(self.history):
self.history_index += 1
if self.history_index == len(self.history):
c.replace(self.lineStart, 'end', self.currentLine.strip('\n'))
return "break"
c.replace(self.lineStart, 'end', self.history[self.history_index])
c.mark_set('insert', 'end')
return "break"
if event.keysym == "Left" and c.index('insert') == self.lineStart:
return "break"
self.currentLine = c.get(self.lineStart, 'end')
if (event.char or not event.keysym) and c.tag_ranges('sel'):
c.selection_clear()
def mark(self, x=None, y=None):
if x is None:
i = 'current'
else:
i = '@{}, {}'.format(x, y)
r, c = [int(x) for x in self.console.index(i).split('.')]
if r == self.click_pos[0] and r == self.click_pos[1]:
return
if r < self.click_pos[0] or (r == self.click_pos[0] and c < self.click_pos[1]):
self.console.tag_add('sel', i, '{}.{}'.format(*self.click_pos))
else:
self.console.tag_add('sel', '{}.{}'.format(*self.click_pos), i)
def cmove(self, event):
if self.pos is None:
return "break"
self.mark(event.x, event.y)
return "break"
def click(self):
self.console.selection_clear()
self.pos = self.console.index('insert')
self.click_pos = [int(x) for x in self.console.index('current').split('.')]
self.console.focus()
return "break"
def clack(self):
self.console.mark_set('insert', self.pos)
self.mark()
self.pos = None
return "break" | 0.218169 | 0.081849 |
import os
import sys
from pathlib import Path
import json
# Linking params
# Global vars for linking and their default values
LINKABLE_FILES_EXTENSION = ".js"
OUTPUT_FILE = "../main.js"
LINKING_MAP_FILE = "default_linked_dirs.json"
LINKING_FOR_FINAL_FILES = False
def linker():
printHeader()
toLinkAccordingArgv()
printFooter()
def printHeader():
print(" --- Start linking ---")
print("\n Files have been linked: \n")
def printFooter():
print("\n --- Linking complete ---")
def toLinkAccordingArgv():
setLinkingParamFromArgv()
toLink()
def setLinkingParamFromArgv():
global LINKABLE_FILES_EXTENSION
global OUTPUT_FILE
global LINKING_MAP_FILE
global LINKING_FOR_FINAL_FILES
if len(sys.argv) >= 4:
LINKABLE_FILES_EXTENSION = sys.argv[1]
OUTPUT_FILE = sys.argv[2]
LINKING_MAP_FILE = sys.argv[3]
if len(sys.argv) >= 5:
if sys.argv[4] == "f":
LINKING_FOR_FINAL_FILES = True
def toLink():
if LINKING_FOR_FINAL_FILES:
linkForFinalFiles(getLinkingMapFromFile(LINKING_MAP_FILE))
else:
linkForFilesInDirs(getLinkingMapFromFile(LINKING_MAP_FILE))
def getLinkingMapFromFile(filePath):
try:
return tryGetLinkingMap(filePath)
except ValueError:
logJSONDecodeError()
except FileNotFoundError:
logFileNotFoundError()
return {}
def tryGetLinkingMap(filePath):
finalFiles = getDataFromJSON(filePath)
return finalFiles
def logJSONDecodeError():
print(" Error: Wrong format of JSON file")
def logFileNotFoundError():
print(" JSON with linking map not found")
def getDataFromJSON(JSON):
with open(JSON, 'r') as file:
return(json.load(file))
def linkForFinalFiles(finalFiles):
outPutFile = open(OUTPUT_FILE, mode = 'w', encoding = "UTF-8")
for fileAnnotation, filePath in finalFiles.items():
linkFinalFileWithOutPutFile(filePath, outPutFile)
printAnnotationAndPathWithSplit(fileAnnotation, filePath, 30)
outPutFile.close()
def linkForFilesInDirs(linkedDirs):
outPutFile = open(OUTPUT_FILE, mode = 'w', encoding = "UTF-8")
for dir in linkedDirs.values(): # iteration for all directories in linkedDirs{}
for subDir in os.walk(dir): # iteration for all subdirectories
for finalFile in subDir[2]: # iteration for all destination files
filePath = getFilePathForDirAndName(subDir, finalFile)
linkFinalFileWithOutPutFile(filePath, outPutFile)
printRelativeFilePath(filePath)
outPutFile.close()
def getFilePathForDirAndName(subDir, fileName):
return str(str(subDir[0]) + '/' + str(fileName))
def linkFinalFileWithOutPutFile(filePath, outPutFile):
if ((getFileExtension(filePath) == LINKABLE_FILES_EXTENSION) and (os.path.basename(filePath) != OUTPUT_FILE)):
outPutFile.write(getTextFromFile(filePath))
def getFileExtension(filePath):
return Path(filePath).suffix
def getTextFromFile(filePath):
linkableFile = open(filePath, mode='r', encoding = "UTF-8")
fileText = linkableFile.read() + '\n' # String with all text from file
linkableFile.close()
return fileText
def printAnnotationAndPathWithSplit(fileAnnotation, filePath, split):
printingString = " - " + fileAnnotation
for i in range(0, split-len(fileAnnotation)):
printingString += " "
printingString += filePath
print(printingString)
def printRelativeFilePath(filePath):
print(" " + filePath)
linker() | linker.py | import os
import sys
from pathlib import Path
import json
# Linking params
# Global vars for linking and their default values
LINKABLE_FILES_EXTENSION = ".js"
OUTPUT_FILE = "../main.js"
LINKING_MAP_FILE = "default_linked_dirs.json"
LINKING_FOR_FINAL_FILES = False
def linker():
printHeader()
toLinkAccordingArgv()
printFooter()
def printHeader():
print(" --- Start linking ---")
print("\n Files have been linked: \n")
def printFooter():
print("\n --- Linking complete ---")
def toLinkAccordingArgv():
setLinkingParamFromArgv()
toLink()
def setLinkingParamFromArgv():
global LINKABLE_FILES_EXTENSION
global OUTPUT_FILE
global LINKING_MAP_FILE
global LINKING_FOR_FINAL_FILES
if len(sys.argv) >= 4:
LINKABLE_FILES_EXTENSION = sys.argv[1]
OUTPUT_FILE = sys.argv[2]
LINKING_MAP_FILE = sys.argv[3]
if len(sys.argv) >= 5:
if sys.argv[4] == "f":
LINKING_FOR_FINAL_FILES = True
def toLink():
if LINKING_FOR_FINAL_FILES:
linkForFinalFiles(getLinkingMapFromFile(LINKING_MAP_FILE))
else:
linkForFilesInDirs(getLinkingMapFromFile(LINKING_MAP_FILE))
def getLinkingMapFromFile(filePath):
try:
return tryGetLinkingMap(filePath)
except ValueError:
logJSONDecodeError()
except FileNotFoundError:
logFileNotFoundError()
return {}
def tryGetLinkingMap(filePath):
finalFiles = getDataFromJSON(filePath)
return finalFiles
def logJSONDecodeError():
print(" Error: Wrong format of JSON file")
def logFileNotFoundError():
print(" JSON with linking map not found")
def getDataFromJSON(JSON):
with open(JSON, 'r') as file:
return(json.load(file))
def linkForFinalFiles(finalFiles):
outPutFile = open(OUTPUT_FILE, mode = 'w', encoding = "UTF-8")
for fileAnnotation, filePath in finalFiles.items():
linkFinalFileWithOutPutFile(filePath, outPutFile)
printAnnotationAndPathWithSplit(fileAnnotation, filePath, 30)
outPutFile.close()
def linkForFilesInDirs(linkedDirs):
outPutFile = open(OUTPUT_FILE, mode = 'w', encoding = "UTF-8")
for dir in linkedDirs.values(): # iteration for all directories in linkedDirs{}
for subDir in os.walk(dir): # iteration for all subdirectories
for finalFile in subDir[2]: # iteration for all destination files
filePath = getFilePathForDirAndName(subDir, finalFile)
linkFinalFileWithOutPutFile(filePath, outPutFile)
printRelativeFilePath(filePath)
outPutFile.close()
def getFilePathForDirAndName(subDir, fileName):
return str(str(subDir[0]) + '/' + str(fileName))
def linkFinalFileWithOutPutFile(filePath, outPutFile):
if ((getFileExtension(filePath) == LINKABLE_FILES_EXTENSION) and (os.path.basename(filePath) != OUTPUT_FILE)):
outPutFile.write(getTextFromFile(filePath))
def getFileExtension(filePath):
return Path(filePath).suffix
def getTextFromFile(filePath):
linkableFile = open(filePath, mode='r', encoding = "UTF-8")
fileText = linkableFile.read() + '\n' # String with all text from file
linkableFile.close()
return fileText
def printAnnotationAndPathWithSplit(fileAnnotation, filePath, split):
printingString = " - " + fileAnnotation
for i in range(0, split-len(fileAnnotation)):
printingString += " "
printingString += filePath
print(printingString)
def printRelativeFilePath(filePath):
print(" " + filePath)
linker() | 0.04691 | 0.050611 |
from server import utils
from server import cache
import requests
import config
class General:
@classmethod
@cache.memoize(timeout=config.cache)
def _calc_supply(cls, height):
snapshot = 443863973624633
supply = 0
for height in range(0, height + 1):
supply += utils.reward(height)
return {
"supply": snapshot + supply,
"mining": supply,
"height": height
}
@classmethod
def info(cls):
data = utils.make_request("getblockchaininfo")
if data["error"] is None:
data["result"]["supply"] = cls._calc_supply(data["result"]["blocks"])["supply"]
data["result"]["reward"] = utils.reward(data["result"]["blocks"])
data["result"].pop("verificationprogress")
data["result"].pop("initialblockdownload")
data["result"].pop("pruned")
data["result"].pop("softforks")
data["result"].pop("bip9_softforks")
data["result"].pop("warnings")
data["result"].pop("size_on_disk")
nethash = utils.make_request("getnetworkhashps", [120, data["result"]["blocks"]])
if nethash["error"] is None:
data["result"]["nethash"] = int(nethash["result"])
return data
@classmethod
@cache.memoize(timeout=config.cache)
def supply(cls):
data = utils.make_request("getblockchaininfo")
height = data["result"]["blocks"]
return cls._calc_supply(height)
@classmethod
def fee(cls):
data = utils.make_request("estimatesmartfee", [6])
if "errors" in data["result"]:
return utils.response({
"feerate": utils.satoshis(0.0001),
"blocks": 6
})
data["result"]["feerate"] = utils.satoshis(data["result"]["feerate"])
return data
@classmethod
def mempool(cls):
data = utils.make_request("getmempoolinfo")
if data["error"] is None:
if data["result"]["size"] > 0:
mempool = utils.make_request("getrawmempool")["result"]
data["result"]["tx"] = mempool
else:
data["result"]["tx"] = []
return data
@classmethod
def price(cls):
link = "https://api.coingecko.com/api/v3/simple/price?ids=microbitcoin&vs_currencies=usd,btc,krw"
return requests.get(link).json() | server/methods/general.py | from server import utils
from server import cache
import requests
import config
class General:
@classmethod
@cache.memoize(timeout=config.cache)
def _calc_supply(cls, height):
snapshot = 443863973624633
supply = 0
for height in range(0, height + 1):
supply += utils.reward(height)
return {
"supply": snapshot + supply,
"mining": supply,
"height": height
}
@classmethod
def info(cls):
data = utils.make_request("getblockchaininfo")
if data["error"] is None:
data["result"]["supply"] = cls._calc_supply(data["result"]["blocks"])["supply"]
data["result"]["reward"] = utils.reward(data["result"]["blocks"])
data["result"].pop("verificationprogress")
data["result"].pop("initialblockdownload")
data["result"].pop("pruned")
data["result"].pop("softforks")
data["result"].pop("bip9_softforks")
data["result"].pop("warnings")
data["result"].pop("size_on_disk")
nethash = utils.make_request("getnetworkhashps", [120, data["result"]["blocks"]])
if nethash["error"] is None:
data["result"]["nethash"] = int(nethash["result"])
return data
@classmethod
@cache.memoize(timeout=config.cache)
def supply(cls):
data = utils.make_request("getblockchaininfo")
height = data["result"]["blocks"]
return cls._calc_supply(height)
@classmethod
def fee(cls):
data = utils.make_request("estimatesmartfee", [6])
if "errors" in data["result"]:
return utils.response({
"feerate": utils.satoshis(0.0001),
"blocks": 6
})
data["result"]["feerate"] = utils.satoshis(data["result"]["feerate"])
return data
@classmethod
def mempool(cls):
data = utils.make_request("getmempoolinfo")
if data["error"] is None:
if data["result"]["size"] > 0:
mempool = utils.make_request("getrawmempool")["result"]
data["result"]["tx"] = mempool
else:
data["result"]["tx"] = []
return data
@classmethod
def price(cls):
link = "https://api.coingecko.com/api/v3/simple/price?ids=microbitcoin&vs_currencies=usd,btc,krw"
return requests.get(link).json() | 0.496094 | 0.139309 |
import os
import argparse
def get_cmd(task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch, warmup,
gpu_type, res_fn):
cmd_str = 'bash exp_with_args.sh %s %s %s %d %d %d %d %d %d %d %d %d %s %s' % \
(task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch,
warmup, gpu_type, res_fn)
return cmd_str
def get_args_by_task_model(task, sub_task, model_tag):
if task == 'translate':
# java-cs: Read 10300 examples, avg src len: 13, avg trg len: 15, max src len: 136, max trg len: 118
# [TOKENIZE] avg src len: 45, avg trg len: 56, max src len: 391, max trg len: 404
src_len = 320
trg_len = 256
epoch = 100
patience = 5
elif task == 'summarize':
# ruby: Read 24927 examples, avg src len: 66, avg trg len: 12, max src len: 501, max trg len: 146
# [TOKENIZE] avg src len: 100, avg trg len: 13, max src len: 1250, max trg len: 161
# Python: Read 251820 examples, avg src len: 100, avg trg len: 11, max src len: 512, max trg len: 222
# [TOKENIZE] avg src len: 142, avg trg len: 12, max src len: 2016, max trg len: 245
# Javascript: Read 58025 examples, avg src len: 114, avg trg len: 11, max src len: 512, max trg len: 165
# [TOKENIZE] avg src len: 136, avg trg len: 12, max src len: 3016, max trg len: 177
src_len = 256
trg_len = 128
epoch = 15
patience = 2
elif task == 'refine':
# small: Read 46680 examples, avg src len: 31, avg trg len: 28, max src len: 50, max trg len: 50
# [TOKENIZE] avg src len: 50, avg trg len: 45, max src len: 129, max trg len: 121
# medium: Read 52364 examples, avg src len: 74, avg trg len: 73, max src len: 100, max trg len: 100
# [TOKENIZE] avg src len: 117, avg trg len: 114, max src len: 238, max trg len: 238
if sub_task == 'small':
src_len = 130
trg_len = 120
elif sub_task == 'medium':
src_len = 240
trg_len = 240
epoch = 50
patience = 5
elif task == 'concode':
# Read 100000 examples, avg src len: 71, avg trg len: 26, max src len: 567, max trg len: 140
# [TOKENIZE] avg src len: 213, avg trg len: 33, max src len: 2246, max trg len: 264
src_len = 320
trg_len = 150
epoch = 30
patience = 3
elif task == 'defect':
# Read 21854 examples, avg src len: 187, avg trg len: 1, max src len: 12195, max trg len: 1
# [TOKENIZE] avg src len: 597, avg trg len: 1, max src len: 41447, max trg len: 1
src_len = 512
trg_len = 3
epoch = 10
patience = 2
elif task == 'clone':
# Read 901028 examples, avg src len: 120, avg trg len: 123, max src len: 5270, max trg len: 5270
# [TOKENIZE] avg src len: 318, avg trg len: 323, max src len: 15111, max trg len: 15111
src_len = 400
trg_len = 400
epoch = 2
patience = 1
if 'codet5_small' in model_tag:
bs = 32
if task == 'summarize' or task == 'translate' or (task == 'refine' and sub_task == 'small'):
bs = 64
else:
bs = 32
if task == 'translate':
bs = 25
elif task == 'summarize':
bs = 48
lr = 5
if task == 'concode':
lr = 10
elif task == 'defect':
lr = 2
return bs, lr, src_len, trg_len, patience, epoch
def run_one_exp(args):
bs, lr, src_len, trg_len, patience, epoch = get_args_by_task_model(args.task, args.sub_task, args.model_tag)
print('============================Start Running==========================')
cmd_str = get_cmd(task=args.task, sub_task=args.sub_task, model_tag=args.model_tag, gpu=args.gpu,
data_num=args.data_num, bs=bs, lr=lr, source_length=src_len, target_length=trg_len,
patience=patience, epoch=epoch, warmup=1000, gpu_type=args.gpu_type,
res_fn='{}/{}_{}.txt'.format(args.res_dir, args.task, args.model_tag))
print('%s\n' % cmd_str)
os.system(cmd_str)
def get_sub_tasks(task):
if task == 'summarize':
sub_tasks = ['ruby', 'javascript', 'go', 'python', 'java', 'php']
elif task == 'translate':
sub_tasks = ['java-cs', 'cs-java']
elif task == 'refine':
sub_tasks = ['small', 'medium']
else:
sub_tasks = ['none']
return sub_tasks
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_tag", type=str, default='codet5_base',
choices=['roberta', 'codebert', 'bart_base', 'codet5_small', 'codet5_base'])
parser.add_argument("--task", type=str, default='summarize', choices=['summarize', 'concode', 'translate',
'refine', 'defect', 'clone'])
parser.add_argument("--sub_task", type=str, default='ruby')
parser.add_argument("--res_dir", type=str, default='results')
parser.add_argument("--gpu_type", type=str, default='a100', choices=['v100', 'a100'])
parser.add_argument("--data_num", type=int, default=-1)
parser.add_argument("--gpu", type=int, default=0)
args = parser.parse_args()
if not os.path.exists(args.res_dir):
os.makedirs(args.res_dir)
assert args.sub_task in get_sub_tasks(args.task)
run_one_exp(args) | sh/run_exp.py | import os
import argparse
def get_cmd(task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch, warmup,
gpu_type, res_fn):
cmd_str = 'bash exp_with_args.sh %s %s %s %d %d %d %d %d %d %d %d %d %s %s' % \
(task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch,
warmup, gpu_type, res_fn)
return cmd_str
def get_args_by_task_model(task, sub_task, model_tag):
if task == 'translate':
# java-cs: Read 10300 examples, avg src len: 13, avg trg len: 15, max src len: 136, max trg len: 118
# [TOKENIZE] avg src len: 45, avg trg len: 56, max src len: 391, max trg len: 404
src_len = 320
trg_len = 256
epoch = 100
patience = 5
elif task == 'summarize':
# ruby: Read 24927 examples, avg src len: 66, avg trg len: 12, max src len: 501, max trg len: 146
# [TOKENIZE] avg src len: 100, avg trg len: 13, max src len: 1250, max trg len: 161
# Python: Read 251820 examples, avg src len: 100, avg trg len: 11, max src len: 512, max trg len: 222
# [TOKENIZE] avg src len: 142, avg trg len: 12, max src len: 2016, max trg len: 245
# Javascript: Read 58025 examples, avg src len: 114, avg trg len: 11, max src len: 512, max trg len: 165
# [TOKENIZE] avg src len: 136, avg trg len: 12, max src len: 3016, max trg len: 177
src_len = 256
trg_len = 128
epoch = 15
patience = 2
elif task == 'refine':
# small: Read 46680 examples, avg src len: 31, avg trg len: 28, max src len: 50, max trg len: 50
# [TOKENIZE] avg src len: 50, avg trg len: 45, max src len: 129, max trg len: 121
# medium: Read 52364 examples, avg src len: 74, avg trg len: 73, max src len: 100, max trg len: 100
# [TOKENIZE] avg src len: 117, avg trg len: 114, max src len: 238, max trg len: 238
if sub_task == 'small':
src_len = 130
trg_len = 120
elif sub_task == 'medium':
src_len = 240
trg_len = 240
epoch = 50
patience = 5
elif task == 'concode':
# Read 100000 examples, avg src len: 71, avg trg len: 26, max src len: 567, max trg len: 140
# [TOKENIZE] avg src len: 213, avg trg len: 33, max src len: 2246, max trg len: 264
src_len = 320
trg_len = 150
epoch = 30
patience = 3
elif task == 'defect':
# Read 21854 examples, avg src len: 187, avg trg len: 1, max src len: 12195, max trg len: 1
# [TOKENIZE] avg src len: 597, avg trg len: 1, max src len: 41447, max trg len: 1
src_len = 512
trg_len = 3
epoch = 10
patience = 2
elif task == 'clone':
# Read 901028 examples, avg src len: 120, avg trg len: 123, max src len: 5270, max trg len: 5270
# [TOKENIZE] avg src len: 318, avg trg len: 323, max src len: 15111, max trg len: 15111
src_len = 400
trg_len = 400
epoch = 2
patience = 1
if 'codet5_small' in model_tag:
bs = 32
if task == 'summarize' or task == 'translate' or (task == 'refine' and sub_task == 'small'):
bs = 64
else:
bs = 32
if task == 'translate':
bs = 25
elif task == 'summarize':
bs = 48
lr = 5
if task == 'concode':
lr = 10
elif task == 'defect':
lr = 2
return bs, lr, src_len, trg_len, patience, epoch
def run_one_exp(args):
bs, lr, src_len, trg_len, patience, epoch = get_args_by_task_model(args.task, args.sub_task, args.model_tag)
print('============================Start Running==========================')
cmd_str = get_cmd(task=args.task, sub_task=args.sub_task, model_tag=args.model_tag, gpu=args.gpu,
data_num=args.data_num, bs=bs, lr=lr, source_length=src_len, target_length=trg_len,
patience=patience, epoch=epoch, warmup=1000, gpu_type=args.gpu_type,
res_fn='{}/{}_{}.txt'.format(args.res_dir, args.task, args.model_tag))
print('%s\n' % cmd_str)
os.system(cmd_str)
def get_sub_tasks(task):
if task == 'summarize':
sub_tasks = ['ruby', 'javascript', 'go', 'python', 'java', 'php']
elif task == 'translate':
sub_tasks = ['java-cs', 'cs-java']
elif task == 'refine':
sub_tasks = ['small', 'medium']
else:
sub_tasks = ['none']
return sub_tasks
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_tag", type=str, default='codet5_base',
choices=['roberta', 'codebert', 'bart_base', 'codet5_small', 'codet5_base'])
parser.add_argument("--task", type=str, default='summarize', choices=['summarize', 'concode', 'translate',
'refine', 'defect', 'clone'])
parser.add_argument("--sub_task", type=str, default='ruby')
parser.add_argument("--res_dir", type=str, default='results')
parser.add_argument("--gpu_type", type=str, default='a100', choices=['v100', 'a100'])
parser.add_argument("--data_num", type=int, default=-1)
parser.add_argument("--gpu", type=int, default=0)
args = parser.parse_args()
if not os.path.exists(args.res_dir):
os.makedirs(args.res_dir)
assert args.sub_task in get_sub_tasks(args.task)
run_one_exp(args) | 0.404743 | 0.364156 |
from __future__ import annotations
import re
from typing import Callable, ClassVar, List, Optional, Pattern, Sequence, Tuple, Union, cast
import discord
from discord.ext import commands
_ID_RE = re.compile(r"([0-9]{15,20})$")
_USER_MENTION_RE = re.compile(r"<@!?([0-9]{15,20})>$")
_CHAN_MENTION_RE = re.compile(r"<#([0-9]{15,20})>$")
_ROLE_MENTION_RE = re.compile(r"<@&([0-9]{15,20})>$")
class MessagePredicate(Callable[[discord.Message], bool]):
"""A simple collection of predicates for message events.
These predicates intend to help simplify checks in message events
and reduce boilerplate code.
This class should be created through the provided classmethods.
Instances of this class are callable message predicates, i.e. they
return ``True`` if a message matches the criteria.
All predicates are combined with :meth:`MessagePredicate.same_context`.
Examples
--------
Waiting for a response in the same channel and from the same
author::
await bot.wait_for("message", check=MessagePredicate.same_context(ctx))
Waiting for a response to a yes or no question::
pred = MessagePredicate.yes_or_no(ctx)
await bot.wait_for("message", check=pred)
if pred.result is True:
# User responded "yes"
...
Getting a member object from a user's response::
pred = MessagePredicate.valid_member(ctx)
await bot.wait_for("message", check=pred)
member = pred.result
Attributes
----------
result : Any
The object which the message content matched with. This is
dependent on the predicate used - see each predicate's
documentation for details, not every method will assign this
attribute. Defaults to ``None``.
"""
def __init__(self, predicate: Callable[["MessagePredicate", discord.Message], bool]) -> None:
self._pred: Callable[["MessagePredicate", discord.Message], bool] = predicate
self.result = None
def __call__(self, message: discord.Message) -> bool:
return self._pred(self, message)
@classmethod
def same_context(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message fits the described context.
Parameters
----------
ctx : Optional[Context]
The current invocation context.
channel : Optional[discord.TextChannel]
The channel we expect a message in. If unspecified,
defaults to ``ctx.channel``. If ``ctx`` is unspecified
too, the message's channel will be ignored.
user : Optional[discord.abc.User]
The user we expect a message from. If unspecified,
defaults to ``ctx.author``. If ``ctx`` is unspecified
too, the message's author will be ignored.
Returns
-------
MessagePredicate
The event predicate.
"""
if ctx is not None:
channel = channel or ctx.channel
user = user or ctx.author
return cls(
lambda self, m: (user is None or user.id == m.author.id)
and (channel is None or channel.id == m.channel.id)
)
@classmethod
def cancelled(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message is ``[p]cancel``.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(
lambda self, m: (same_context(m) and m.content.lower() == f"{ctx.prefix}cancel")
)
@classmethod
def yes_or_no(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message is "yes"/"y" or "no"/"n".
This will assign ``True`` for *yes*, or ``False`` for *no* to
the `result` attribute.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
content = m.content.lower()
if content in ("yes", "y"):
self.result = True
elif content in ("no", "n"):
self.result = False
else:
return False
return True
return cls(predicate)
@classmethod
def valid_int(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is an integer.
Assigns the response to `result` as an `int`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = int(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def valid_float(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is a float.
Assigns the response to `result` as a `float`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = float(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def positive(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is a positive number.
Assigns the response to `result` as a `float`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
number = float(m.content)
except ValueError:
return False
else:
if number > 0:
self.result = number
return True
else:
return False
return cls(predicate)
@classmethod
def valid_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a role in the current guild.
Assigns the matching `discord.Role` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def valid_member(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a member in the current guild.
Assigns the matching `discord.Member` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _USER_MENTION_RE.match(m.content)
if match:
result = guild.get_member(int(match.group(1)))
else:
result = guild.get_member_named(m.content)
if result is None:
return False
self.result = result
return True
return cls(predicate)
@classmethod
def valid_text_channel(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a text channel in the current guild.
Assigns the matching `discord.TextChannel` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _CHAN_MENTION_RE.match(m.content)
if match:
result = guild.get_channel(int(match.group(1)))
else:
result = discord.utils.get(guild.text_channels, name=m.content)
if not isinstance(result, discord.TextChannel):
return False
self.result = result
return True
return cls(predicate)
@classmethod
def has_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a role which the author has.
Assigns the matching `discord.Role` object to `result`.
One of ``user`` or ``ctx`` must be supplied. This predicate
cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
if user is None:
if ctx is None:
raise TypeError(
"One of `user` or `ctx` must be supplied to `MessagePredicate.has_role`."
)
user = ctx.author
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None or role not in user.roles:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is equal to the specified value.
Parameters
----------
value : str
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content == value)
@classmethod
def lower_equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response *as lowercase* is equal to the specified value.
Parameters
----------
value : str
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content.lower() == value)
@classmethod
def less(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is less than the specified value.
Parameters
----------
value : Union[int, float]
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) < value)
@classmethod
def greater(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is greater than the specified value.
Parameters
----------
value : Union[int, float]
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) > value)
@classmethod
def length_less(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response's length is less than the specified length.
Parameters
----------
length : int
The value to compare the response's length with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) <= length)
@classmethod
def length_greater(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response's length is greater than the specified length.
Parameters
----------
length : int
The value to compare the response's length with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) >= length)
@classmethod
def contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is contained in the specified collection.
The index of the response in the ``collection`` sequence is
assigned to the `result` attribute.
Parameters
----------
collection : Sequence[str]
The collection containing valid responses.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def lower_contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Same as :meth:`contained_in`, but the response is set to lowercase before matching.
Parameters
----------
collection : Sequence[str]
The collection containing valid lowercase responses.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content.lower())
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def regex(
cls,
pattern: Union[Pattern[str], str],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response matches the specified regex pattern.
This predicate will use `re.search` to find a match. The
resulting `match object <match-objects>` will be assigned
to `result`.
Parameters
----------
pattern : Union[`pattern object <re-objects>`, str]
The pattern to search for in the response.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
if isinstance(pattern, str):
pattern_obj = re.compile(pattern)
else:
pattern_obj = pattern
match = pattern_obj.search(m.content)
if match:
self.result = match
return True
return False
return cls(predicate)
@staticmethod
def _find_role(guild: discord.Guild, argument: str) -> Optional[discord.Role]:
match = _ID_RE.match(argument) or _ROLE_MENTION_RE.match(argument)
if match:
result = guild.get_role(int(match.group(1)))
else:
result = discord.utils.get(guild.roles, name=argument)
return result
@staticmethod
def _get_guild(
ctx: commands.Context, channel: discord.TextChannel, user: discord.Member
) -> discord.Guild:
if ctx is not None:
return ctx.guild
elif channel is not None:
return channel.guild
elif user is not None:
return user.guild
class ReactionPredicate(Callable[[discord.Reaction, discord.abc.User], bool]):
"""A collection of predicates for reaction events.
All checks are combined with :meth:`ReactionPredicate.same_context`.
Examples
--------
Confirming a yes/no question with a tick/cross reaction::
from redbot.core.utils.predicates import ReactionPredicate
from redbot.core.utils.menus import start_adding_reactions
msg = await ctx.send("Yes or no?")
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
await ctx.bot.wait_for("reaction_add", check=pred)
if pred.result is True:
# User responded with tick
...
else:
# User responded with cross
...
Waiting for the first reaction from any user with one of the first
5 letters of the alphabet::
from redbot.core.utils.predicates import ReactionPredicate
from redbot.core.utils.menus import start_adding_reactions
msg = await ctx.send("React to me!")
emojis = ReactionPredicate.ALPHABET_EMOJIS[:5]
start_adding_reactions(msg, emojis)
pred = ReactionPredicate.with_emojis(emojis, msg)
await ctx.bot.wait_for("reaction_add", check=pred)
# pred.result is now the index of the letter in `emojis`
Attributes
----------
result : Any
The object which the reaction matched with. This is
dependent on the predicate used - see each predicate's
documentation for details, not every method will assign this
attribute. Defaults to ``None``.
"""
YES_OR_NO_EMOJIS: ClassVar[Tuple[str, str]] = (
"\N{WHITE HEAVY CHECK MARK}",
"\N{NEGATIVE SQUARED CROSS MARK}",
)
"""Tuple[str, str] : A tuple containing the tick emoji and cross emoji, in that order."""
ALPHABET_EMOJIS: ClassVar[List[str]] = [
chr(code)
for code in range(
ord("\N{REGIONAL INDICATOR SYMBOL LETTER A}"),
ord("\N{REGIONAL INDICATOR SYMBOL LETTER Z}") + 1,
)
]
"""List[str] : A list of all 26 alphabetical letter emojis."""
NUMBER_EMOJIS: ClassVar[List[str]] = [
chr(code) + "\N{COMBINING ENCLOSING KEYCAP}" for code in range(ord("0"), ord("9") + 1)
]
"""List[str] : A list of all single-digit number emojis, 0 through 9."""
def __init__(
self, predicate: Callable[["ReactionPredicate", discord.Reaction, discord.abc.User], bool]
) -> None:
self._pred: Callable[
["ReactionPredicate", discord.Reaction, discord.abc.User], bool
] = predicate
self.result = None
def __call__(self, reaction: discord.Reaction, user: discord.abc.User) -> bool:
return self._pred(self, reaction, user)
# noinspection PyUnusedLocal
@classmethod
def same_context(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
"""Match if a reaction fits the described context.
This will ignore reactions added by the bot user, regardless
of whether or not ``user`` is supplied.
Parameters
----------
message : Optional[discord.Message]
The message which we expect a reaction to. If unspecified,
the reaction's message will be ignored.
user : Optional[discord.abc.User]
The user we expect to react. If unspecified, the user who
added the reaction will be ignored.
Returns
-------
ReactionPredicate
The event predicate.
"""
# noinspection PyProtectedMember
me_id = message._state.self_id
return cls(
lambda self, r, u: u.id != me_id
and (message is None or r.message.id == message.id)
and (user is None or u.id == user.id)
)
@classmethod
def with_emojis(
cls,
emojis: Sequence[Union[str, discord.Emoji, discord.PartialEmoji]],
message: Optional[discord.Message] = None,
user: Optional[discord.abc.User] = None,
) -> "ReactionPredicate":
"""Match if the reaction is one of the specified emojis.
Parameters
----------
emojis : Sequence[Union[str, discord.Emoji, discord.PartialEmoji]]
The emojis of which one we expect to be reacted.
message : discord.Message
Same as ``message`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
ReactionPredicate
The event predicate.
"""
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User):
if not same_context(r, u):
return False
try:
self.result = emojis.index(r.emoji)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def yes_or_no(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
"""Match if the reaction is a tick or cross emoji.
The emojis used are in
`ReactionPredicate.YES_OR_NO_EMOJIS`.
This will assign ``True`` for *yes*, or ``False`` for *no* to
the `result` attribute.
Parameters
----------
message : discord.Message
Same as ``message`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
ReactionPredicate
The event predicate.
"""
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User) -> bool:
if not same_context(r, u):
return False
try:
self.result = not bool(self.YES_OR_NO_EMOJIS.index(r.emoji))
except ValueError:
return False
else:
return True
return cls(predicate) | bot/utils/predicates.py | from __future__ import annotations
import re
from typing import Callable, ClassVar, List, Optional, Pattern, Sequence, Tuple, Union, cast
import discord
from discord.ext import commands
_ID_RE = re.compile(r"([0-9]{15,20})$")
_USER_MENTION_RE = re.compile(r"<@!?([0-9]{15,20})>$")
_CHAN_MENTION_RE = re.compile(r"<#([0-9]{15,20})>$")
_ROLE_MENTION_RE = re.compile(r"<@&([0-9]{15,20})>$")
class MessagePredicate(Callable[[discord.Message], bool]):
"""A simple collection of predicates for message events.
These predicates intend to help simplify checks in message events
and reduce boilerplate code.
This class should be created through the provided classmethods.
Instances of this class are callable message predicates, i.e. they
return ``True`` if a message matches the criteria.
All predicates are combined with :meth:`MessagePredicate.same_context`.
Examples
--------
Waiting for a response in the same channel and from the same
author::
await bot.wait_for("message", check=MessagePredicate.same_context(ctx))
Waiting for a response to a yes or no question::
pred = MessagePredicate.yes_or_no(ctx)
await bot.wait_for("message", check=pred)
if pred.result is True:
# User responded "yes"
...
Getting a member object from a user's response::
pred = MessagePredicate.valid_member(ctx)
await bot.wait_for("message", check=pred)
member = pred.result
Attributes
----------
result : Any
The object which the message content matched with. This is
dependent on the predicate used - see each predicate's
documentation for details, not every method will assign this
attribute. Defaults to ``None``.
"""
def __init__(self, predicate: Callable[["MessagePredicate", discord.Message], bool]) -> None:
self._pred: Callable[["MessagePredicate", discord.Message], bool] = predicate
self.result = None
def __call__(self, message: discord.Message) -> bool:
return self._pred(self, message)
@classmethod
def same_context(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message fits the described context.
Parameters
----------
ctx : Optional[Context]
The current invocation context.
channel : Optional[discord.TextChannel]
The channel we expect a message in. If unspecified,
defaults to ``ctx.channel``. If ``ctx`` is unspecified
too, the message's channel will be ignored.
user : Optional[discord.abc.User]
The user we expect a message from. If unspecified,
defaults to ``ctx.author``. If ``ctx`` is unspecified
too, the message's author will be ignored.
Returns
-------
MessagePredicate
The event predicate.
"""
if ctx is not None:
channel = channel or ctx.channel
user = user or ctx.author
return cls(
lambda self, m: (user is None or user.id == m.author.id)
and (channel is None or channel.id == m.channel.id)
)
@classmethod
def cancelled(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message is ``[p]cancel``.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(
lambda self, m: (same_context(m) and m.content.lower() == f"{ctx.prefix}cancel")
)
@classmethod
def yes_or_no(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message is "yes"/"y" or "no"/"n".
This will assign ``True`` for *yes*, or ``False`` for *no* to
the `result` attribute.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
content = m.content.lower()
if content in ("yes", "y"):
self.result = True
elif content in ("no", "n"):
self.result = False
else:
return False
return True
return cls(predicate)
@classmethod
def valid_int(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is an integer.
Assigns the response to `result` as an `int`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = int(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def valid_float(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is a float.
Assigns the response to `result` as a `float`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = float(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def positive(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is a positive number.
Assigns the response to `result` as a `float`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
number = float(m.content)
except ValueError:
return False
else:
if number > 0:
self.result = number
return True
else:
return False
return cls(predicate)
@classmethod
def valid_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a role in the current guild.
Assigns the matching `discord.Role` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def valid_member(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a member in the current guild.
Assigns the matching `discord.Member` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _USER_MENTION_RE.match(m.content)
if match:
result = guild.get_member(int(match.group(1)))
else:
result = guild.get_member_named(m.content)
if result is None:
return False
self.result = result
return True
return cls(predicate)
@classmethod
def valid_text_channel(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a text channel in the current guild.
Assigns the matching `discord.TextChannel` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _CHAN_MENTION_RE.match(m.content)
if match:
result = guild.get_channel(int(match.group(1)))
else:
result = discord.utils.get(guild.text_channels, name=m.content)
if not isinstance(result, discord.TextChannel):
return False
self.result = result
return True
return cls(predicate)
@classmethod
def has_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a role which the author has.
Assigns the matching `discord.Role` object to `result`.
One of ``user`` or ``ctx`` must be supplied. This predicate
cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
if user is None:
if ctx is None:
raise TypeError(
"One of `user` or `ctx` must be supplied to `MessagePredicate.has_role`."
)
user = ctx.author
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None or role not in user.roles:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is equal to the specified value.
Parameters
----------
value : str
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content == value)
@classmethod
def lower_equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response *as lowercase* is equal to the specified value.
Parameters
----------
value : str
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content.lower() == value)
@classmethod
def less(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is less than the specified value.
Parameters
----------
value : Union[int, float]
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) < value)
@classmethod
def greater(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is greater than the specified value.
Parameters
----------
value : Union[int, float]
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) > value)
@classmethod
def length_less(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response's length is less than the specified length.
Parameters
----------
length : int
The value to compare the response's length with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) <= length)
@classmethod
def length_greater(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response's length is greater than the specified length.
Parameters
----------
length : int
The value to compare the response's length with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) >= length)
@classmethod
def contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is contained in the specified collection.
The index of the response in the ``collection`` sequence is
assigned to the `result` attribute.
Parameters
----------
collection : Sequence[str]
The collection containing valid responses.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def lower_contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Same as :meth:`contained_in`, but the response is set to lowercase before matching.
Parameters
----------
collection : Sequence[str]
The collection containing valid lowercase responses.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content.lower())
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def regex(
cls,
pattern: Union[Pattern[str], str],
ctx: Optional[commands.Context] = None,
channel: Optional[Union[discord.TextChannel, discord.DMChannel]] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response matches the specified regex pattern.
This predicate will use `re.search` to find a match. The
resulting `match object <match-objects>` will be assigned
to `result`.
Parameters
----------
pattern : Union[`pattern object <re-objects>`, str]
The pattern to search for in the response.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
if isinstance(pattern, str):
pattern_obj = re.compile(pattern)
else:
pattern_obj = pattern
match = pattern_obj.search(m.content)
if match:
self.result = match
return True
return False
return cls(predicate)
@staticmethod
def _find_role(guild: discord.Guild, argument: str) -> Optional[discord.Role]:
match = _ID_RE.match(argument) or _ROLE_MENTION_RE.match(argument)
if match:
result = guild.get_role(int(match.group(1)))
else:
result = discord.utils.get(guild.roles, name=argument)
return result
@staticmethod
def _get_guild(
ctx: commands.Context, channel: discord.TextChannel, user: discord.Member
) -> discord.Guild:
if ctx is not None:
return ctx.guild
elif channel is not None:
return channel.guild
elif user is not None:
return user.guild
class ReactionPredicate(Callable[[discord.Reaction, discord.abc.User], bool]):
"""A collection of predicates for reaction events.
All checks are combined with :meth:`ReactionPredicate.same_context`.
Examples
--------
Confirming a yes/no question with a tick/cross reaction::
from redbot.core.utils.predicates import ReactionPredicate
from redbot.core.utils.menus import start_adding_reactions
msg = await ctx.send("Yes or no?")
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
await ctx.bot.wait_for("reaction_add", check=pred)
if pred.result is True:
# User responded with tick
...
else:
# User responded with cross
...
Waiting for the first reaction from any user with one of the first
5 letters of the alphabet::
from redbot.core.utils.predicates import ReactionPredicate
from redbot.core.utils.menus import start_adding_reactions
msg = await ctx.send("React to me!")
emojis = ReactionPredicate.ALPHABET_EMOJIS[:5]
start_adding_reactions(msg, emojis)
pred = ReactionPredicate.with_emojis(emojis, msg)
await ctx.bot.wait_for("reaction_add", check=pred)
# pred.result is now the index of the letter in `emojis`
Attributes
----------
result : Any
The object which the reaction matched with. This is
dependent on the predicate used - see each predicate's
documentation for details, not every method will assign this
attribute. Defaults to ``None``.
"""
YES_OR_NO_EMOJIS: ClassVar[Tuple[str, str]] = (
"\N{WHITE HEAVY CHECK MARK}",
"\N{NEGATIVE SQUARED CROSS MARK}",
)
"""Tuple[str, str] : A tuple containing the tick emoji and cross emoji, in that order."""
ALPHABET_EMOJIS: ClassVar[List[str]] = [
chr(code)
for code in range(
ord("\N{REGIONAL INDICATOR SYMBOL LETTER A}"),
ord("\N{REGIONAL INDICATOR SYMBOL LETTER Z}") + 1,
)
]
"""List[str] : A list of all 26 alphabetical letter emojis."""
NUMBER_EMOJIS: ClassVar[List[str]] = [
chr(code) + "\N{COMBINING ENCLOSING KEYCAP}" for code in range(ord("0"), ord("9") + 1)
]
"""List[str] : A list of all single-digit number emojis, 0 through 9."""
def __init__(
self, predicate: Callable[["ReactionPredicate", discord.Reaction, discord.abc.User], bool]
) -> None:
self._pred: Callable[
["ReactionPredicate", discord.Reaction, discord.abc.User], bool
] = predicate
self.result = None
def __call__(self, reaction: discord.Reaction, user: discord.abc.User) -> bool:
return self._pred(self, reaction, user)
# noinspection PyUnusedLocal
@classmethod
def same_context(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
"""Match if a reaction fits the described context.
This will ignore reactions added by the bot user, regardless
of whether or not ``user`` is supplied.
Parameters
----------
message : Optional[discord.Message]
The message which we expect a reaction to. If unspecified,
the reaction's message will be ignored.
user : Optional[discord.abc.User]
The user we expect to react. If unspecified, the user who
added the reaction will be ignored.
Returns
-------
ReactionPredicate
The event predicate.
"""
# noinspection PyProtectedMember
me_id = message._state.self_id
return cls(
lambda self, r, u: u.id != me_id
and (message is None or r.message.id == message.id)
and (user is None or u.id == user.id)
)
@classmethod
def with_emojis(
cls,
emojis: Sequence[Union[str, discord.Emoji, discord.PartialEmoji]],
message: Optional[discord.Message] = None,
user: Optional[discord.abc.User] = None,
) -> "ReactionPredicate":
"""Match if the reaction is one of the specified emojis.
Parameters
----------
emojis : Sequence[Union[str, discord.Emoji, discord.PartialEmoji]]
The emojis of which one we expect to be reacted.
message : discord.Message
Same as ``message`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
ReactionPredicate
The event predicate.
"""
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User):
if not same_context(r, u):
return False
try:
self.result = emojis.index(r.emoji)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def yes_or_no(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
"""Match if the reaction is a tick or cross emoji.
The emojis used are in
`ReactionPredicate.YES_OR_NO_EMOJIS`.
This will assign ``True`` for *yes*, or ``False`` for *no* to
the `result` attribute.
Parameters
----------
message : discord.Message
Same as ``message`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
ReactionPredicate
The event predicate.
"""
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User) -> bool:
if not same_context(r, u):
return False
try:
self.result = not bool(self.YES_OR_NO_EMOJIS.index(r.emoji))
except ValueError:
return False
else:
return True
return cls(predicate) | 0.934005 | 0.281963 |
from __future__ import absolute_import
from sentry.models.projectoption import ProjectOption
from sentry.testutils import TestCase
from sentry.utils.safe import set_path
from sentry.message_filters import (
_localhost_filter, _browser_extensions_filter, _web_crawlers_filter,
_legacy_browsers_filter,
)
class FilterTests(TestCase):
def _get_message(self):
return {
}
def _set_filter_state(self, flt, state):
ProjectOption.objects.set_value(
project=self.project,
key=u'filters:{}'.format(flt.spec.id),
value=state,
)
def _get_message_with_bad_ip(self):
message = self._get_message()
set_path(message, 'user', 'ip_address', value='127.0.0.1')
return message
def test_should_not_filter_simple_messages(self):
# baseline test (so we know everything works as expected)
message = self._get_message()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def test_should_filter_local_ip_addresses_when_enabled(self):
self._set_filter_state(_localhost_filter, '1')
message = self._get_message_with_bad_ip()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_bad_ip_addresses_when_disabled(self):
self._set_filter_state(_localhost_filter, '0')
message = self._get_message_with_bad_ip()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def _get_message_with_bad_extension(self):
message = self._get_message()
set_path(message, 'platform', value='javascript')
set_path(message, 'exception', value={
'values': [
{
'type': 'Error',
'value': 'http://loading.retry.widdit.com/',
}
]
})
return message
def test_should_filter_browser_extensions_when_enbabled(self):
self._set_filter_state(_browser_extensions_filter, '1')
message = self._get_message_with_bad_extension()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_browser_extensions_when_disabled(self):
self._set_filter_state(_browser_extensions_filter, '0')
message = self._get_message_with_bad_extension()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def _get_message_from_webcrawler(self):
message = self._get_message()
set_path(message, 'request', value={
'url': 'http://example.com',
'method': 'GET',
'headers': [
['User-Agent', 'Mediapartners-Google'],
]
})
return message
def test_should_filter_web_crawlers_when_enabled(self):
self._set_filter_state(_web_crawlers_filter, '1')
message = self._get_message_from_webcrawler()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_web_crawlers_when_disabled(self):
self._set_filter_state(_web_crawlers_filter, '0')
message = self._get_message_from_webcrawler()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def _get_message_from_legacy_browser(self):
ie_5_user_agent = 'Mozilla/4.0 (compatible; MSIE 5.50; Windows NT; SiteKiosk 4.9; SiteCoach 1.0)'
message = self._get_message()
set_path(message, 'platform', value='javascript')
set_path(message, 'request', value={
'url': 'http://example.com',
'method': 'GET',
'headers': [
['User-Agent', ie_5_user_agent],
]
})
return message
def test_should_filter_legacy_browsers_all_enabled(self):
self._set_filter_state(_legacy_browsers_filter, '1')
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_filter_legacy_browsers_specific_browsers(self):
self._set_filter_state(_legacy_browsers_filter, {'ie_pre_9', 'safari_5'})
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_legacy_browsers_when_disabled(self):
self._set_filter_state(_legacy_browsers_filter, '0')
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def test_should_not_filter_legacy_browsers_when_current_browser_check_disabled(self):
self._set_filter_state(_legacy_browsers_filter, {'safari_5'})
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error | tests/integration/test_message_filters.py | from __future__ import absolute_import
from sentry.models.projectoption import ProjectOption
from sentry.testutils import TestCase
from sentry.utils.safe import set_path
from sentry.message_filters import (
_localhost_filter, _browser_extensions_filter, _web_crawlers_filter,
_legacy_browsers_filter,
)
class FilterTests(TestCase):
def _get_message(self):
return {
}
def _set_filter_state(self, flt, state):
ProjectOption.objects.set_value(
project=self.project,
key=u'filters:{}'.format(flt.spec.id),
value=state,
)
def _get_message_with_bad_ip(self):
message = self._get_message()
set_path(message, 'user', 'ip_address', value='127.0.0.1')
return message
def test_should_not_filter_simple_messages(self):
# baseline test (so we know everything works as expected)
message = self._get_message()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def test_should_filter_local_ip_addresses_when_enabled(self):
self._set_filter_state(_localhost_filter, '1')
message = self._get_message_with_bad_ip()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_bad_ip_addresses_when_disabled(self):
self._set_filter_state(_localhost_filter, '0')
message = self._get_message_with_bad_ip()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def _get_message_with_bad_extension(self):
message = self._get_message()
set_path(message, 'platform', value='javascript')
set_path(message, 'exception', value={
'values': [
{
'type': 'Error',
'value': 'http://loading.retry.widdit.com/',
}
]
})
return message
def test_should_filter_browser_extensions_when_enbabled(self):
self._set_filter_state(_browser_extensions_filter, '1')
message = self._get_message_with_bad_extension()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_browser_extensions_when_disabled(self):
self._set_filter_state(_browser_extensions_filter, '0')
message = self._get_message_with_bad_extension()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def _get_message_from_webcrawler(self):
message = self._get_message()
set_path(message, 'request', value={
'url': 'http://example.com',
'method': 'GET',
'headers': [
['User-Agent', 'Mediapartners-Google'],
]
})
return message
def test_should_filter_web_crawlers_when_enabled(self):
self._set_filter_state(_web_crawlers_filter, '1')
message = self._get_message_from_webcrawler()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_web_crawlers_when_disabled(self):
self._set_filter_state(_web_crawlers_filter, '0')
message = self._get_message_from_webcrawler()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def _get_message_from_legacy_browser(self):
ie_5_user_agent = 'Mozilla/4.0 (compatible; MSIE 5.50; Windows NT; SiteKiosk 4.9; SiteCoach 1.0)'
message = self._get_message()
set_path(message, 'platform', value='javascript')
set_path(message, 'request', value={
'url': 'http://example.com',
'method': 'GET',
'headers': [
['User-Agent', ie_5_user_agent],
]
})
return message
def test_should_filter_legacy_browsers_all_enabled(self):
self._set_filter_state(_legacy_browsers_filter, '1')
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_filter_legacy_browsers_specific_browsers(self):
self._set_filter_state(_legacy_browsers_filter, {'ie_pre_9', 'safari_5'})
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code >= 400 # some http error
def test_should_not_filter_legacy_browsers_when_disabled(self):
self._set_filter_state(_legacy_browsers_filter, '0')
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error
def test_should_not_filter_legacy_browsers_when_current_browser_check_disabled(self):
self._set_filter_state(_legacy_browsers_filter, {'safari_5'})
message = self._get_message_from_legacy_browser()
resp = self._postWithHeader(message)
assert resp.status_code < 400 # no http error | 0.659295 | 0.139045 |
"""Compare environment variables snapshot with expected and detect changes
"""
import argparse
import difflib
import logging
import pathlib
import re
import os
import sys
import typing
import uuid
def normalize_env_variables(variables: typing.Dict[str, str]) -> typing.Dict[str, str]:
"""Cleanup environment variables dict from duplicates in PATH-like variables"""
output = {}
for name, value in variables.items():
if name in ['PATH', 'PYTHONPATH', 'PKG_CONFIG_PATH', 'LD_LIBRARY_PATH', 'LIBRARY_PATH', 'OV_FRONTEND_PATH']:
paths = set(filter(None, map(str.strip, value.split(':'))))
output[name] = ':'.join(sorted(paths))
else:
output[name] = value
return output
def extract_changed_environment_variables(vars_before: typing.Dict[str, str],
vars_after: typing.Dict[str, str]) -> typing.Dict[str, str]:
"""Extract current values of environment variables (and handle PATH-like variables as set of values)"""
return normalize_env_variables(dict(set(vars_after.items()) - set(vars_before.items())))
def load_variables(path: str, env_prefix: bool = False) -> typing.Dict[str, str]:
"""Load environment variables and its values from and env output or a dockerfile-like file"""
variables = {}
pattern = re.compile(r'^ENV\s+([A-Za-z_]+)=(.*)$' if env_prefix else r'^([A-Za-z_]+)=(.*)$')
with open(path) as file:
for record in filter(None, map(str.strip, file.readlines())):
match = pattern.match(record)
if not match:
return {}
name = match.group(1)
value = match.group(2)
variables[name] = value
return normalize_env_variables(variables)
def save_env_template(path: pathlib.Path, variables: typing.Dict[str, str]):
"""Save environment variables dict in the file in dockerfile-like format"""
with open(path, mode='w') as template:
for name, value in variables.items():
template.write(f'ENV {name}={value}\n')
def compare_templates(expected_path: pathlib.Path, actual_path: pathlib.Path, image: str, log: str):
"""Compare two template files and save HTML diff"""
with open(expected_path, mode='r') as expected, \
open(actual_path, mode='r') as actual, \
open(pathlib.Path(log) / f'env_{uuid.uuid4()}.html', mode='w') as html_log:
html_log.write(difflib.HtmlDiff(wrapcolumn=100).make_file(expected.readlines(), actual.readlines(),
'origin', image, context=True))
def main() -> int:
"""Compare environment variables snapshot with expected and create HTML report if different"""
parser = argparse.ArgumentParser(prog=os.path.basename(__file__),
description='This is script to extract environment variables changes from '
'snapshots, compare with expected and create HTML diff report '
'if different',
add_help=True)
parser.add_argument(
'-i',
'--image',
metavar='NAME',
required=True,
help='Image name',
)
parser.add_argument(
'-e',
'--expected',
metavar='PATH',
required=True,
help='Path to file with expected environment variable changes from the script',
)
parser.add_argument(
'-b',
'--before',
metavar='PATH',
required=True,
help='Path to file with environment variables snapshot before script launch',
)
parser.add_argument(
'-a',
'--after',
metavar='PATH',
required=True,
help='Path to file with environment variables snapshot after script launch',
)
parser.add_argument(
'-l',
'--logs',
metavar='PATH',
default=str(pathlib.Path(os.path.realpath(__file__)).parent),
help='Log path folder to store logs',
)
args = parser.parse_args()
logging.basicConfig(level='INFO')
log = logging.getLogger(__name__)
log.info(f'Parsing inputs...')
vars_before = load_variables(args.before)
vars_after = load_variables(args.after)
vars_created = {name: vars_after[name] for name in set(vars_after.keys()) - set(vars_before.keys())}
vars_expected = load_variables(args.expected, True)
vars_expected_updated = {name: vars_after[name] for name in vars_after if name in vars_expected}
vars_current = {**vars_expected, **vars_created, **vars_expected_updated}
log.info('Generate updated environment variables template and search for changes:')
output_path = pathlib.Path(args.logs) / os.path.basename(args.expected)
save_env_template(output_path, vars_current)
if vars_expected != vars_current:
exit_code = 1
vars_changed_script = extract_changed_environment_variables(vars_before, vars_after)
vars_changed = extract_changed_environment_variables(vars_expected, vars_current)
log.error('FAILED: changes detected')
log.error(f' after script launch {vars_changed_script}')
log.error(f' with expected {vars_changed}')
compare_templates(args.expected, output_path, args.image, args.logs)
else:
exit_code = 0
log.info('PASSED')
if vars_created:
exit_code = 1
log.error(f'FAILED: new variables are created - {vars_created}')
if exit_code:
log.info(f'See logs in {args.logs}')
return exit_code
if __name__ == '__main__':
sys.exit(main()) | tests/resources/environment_vars/env_vars_changes_compare.py | """Compare environment variables snapshot with expected and detect changes
"""
import argparse
import difflib
import logging
import pathlib
import re
import os
import sys
import typing
import uuid
def normalize_env_variables(variables: typing.Dict[str, str]) -> typing.Dict[str, str]:
"""Cleanup environment variables dict from duplicates in PATH-like variables"""
output = {}
for name, value in variables.items():
if name in ['PATH', 'PYTHONPATH', 'PKG_CONFIG_PATH', 'LD_LIBRARY_PATH', 'LIBRARY_PATH', 'OV_FRONTEND_PATH']:
paths = set(filter(None, map(str.strip, value.split(':'))))
output[name] = ':'.join(sorted(paths))
else:
output[name] = value
return output
def extract_changed_environment_variables(vars_before: typing.Dict[str, str],
vars_after: typing.Dict[str, str]) -> typing.Dict[str, str]:
"""Extract current values of environment variables (and handle PATH-like variables as set of values)"""
return normalize_env_variables(dict(set(vars_after.items()) - set(vars_before.items())))
def load_variables(path: str, env_prefix: bool = False) -> typing.Dict[str, str]:
"""Load environment variables and its values from and env output or a dockerfile-like file"""
variables = {}
pattern = re.compile(r'^ENV\s+([A-Za-z_]+)=(.*)$' if env_prefix else r'^([A-Za-z_]+)=(.*)$')
with open(path) as file:
for record in filter(None, map(str.strip, file.readlines())):
match = pattern.match(record)
if not match:
return {}
name = match.group(1)
value = match.group(2)
variables[name] = value
return normalize_env_variables(variables)
def save_env_template(path: pathlib.Path, variables: typing.Dict[str, str]):
"""Save environment variables dict in the file in dockerfile-like format"""
with open(path, mode='w') as template:
for name, value in variables.items():
template.write(f'ENV {name}={value}\n')
def compare_templates(expected_path: pathlib.Path, actual_path: pathlib.Path, image: str, log: str):
"""Compare two template files and save HTML diff"""
with open(expected_path, mode='r') as expected, \
open(actual_path, mode='r') as actual, \
open(pathlib.Path(log) / f'env_{uuid.uuid4()}.html', mode='w') as html_log:
html_log.write(difflib.HtmlDiff(wrapcolumn=100).make_file(expected.readlines(), actual.readlines(),
'origin', image, context=True))
def main() -> int:
"""Compare environment variables snapshot with expected and create HTML report if different"""
parser = argparse.ArgumentParser(prog=os.path.basename(__file__),
description='This is script to extract environment variables changes from '
'snapshots, compare with expected and create HTML diff report '
'if different',
add_help=True)
parser.add_argument(
'-i',
'--image',
metavar='NAME',
required=True,
help='Image name',
)
parser.add_argument(
'-e',
'--expected',
metavar='PATH',
required=True,
help='Path to file with expected environment variable changes from the script',
)
parser.add_argument(
'-b',
'--before',
metavar='PATH',
required=True,
help='Path to file with environment variables snapshot before script launch',
)
parser.add_argument(
'-a',
'--after',
metavar='PATH',
required=True,
help='Path to file with environment variables snapshot after script launch',
)
parser.add_argument(
'-l',
'--logs',
metavar='PATH',
default=str(pathlib.Path(os.path.realpath(__file__)).parent),
help='Log path folder to store logs',
)
args = parser.parse_args()
logging.basicConfig(level='INFO')
log = logging.getLogger(__name__)
log.info(f'Parsing inputs...')
vars_before = load_variables(args.before)
vars_after = load_variables(args.after)
vars_created = {name: vars_after[name] for name in set(vars_after.keys()) - set(vars_before.keys())}
vars_expected = load_variables(args.expected, True)
vars_expected_updated = {name: vars_after[name] for name in vars_after if name in vars_expected}
vars_current = {**vars_expected, **vars_created, **vars_expected_updated}
log.info('Generate updated environment variables template and search for changes:')
output_path = pathlib.Path(args.logs) / os.path.basename(args.expected)
save_env_template(output_path, vars_current)
if vars_expected != vars_current:
exit_code = 1
vars_changed_script = extract_changed_environment_variables(vars_before, vars_after)
vars_changed = extract_changed_environment_variables(vars_expected, vars_current)
log.error('FAILED: changes detected')
log.error(f' after script launch {vars_changed_script}')
log.error(f' with expected {vars_changed}')
compare_templates(args.expected, output_path, args.image, args.logs)
else:
exit_code = 0
log.info('PASSED')
if vars_created:
exit_code = 1
log.error(f'FAILED: new variables are created - {vars_created}')
if exit_code:
log.info(f'See logs in {args.logs}')
return exit_code
if __name__ == '__main__':
sys.exit(main()) | 0.500488 | 0.37935 |
import torchvision_sunner.transforms as sunnertransforms
import torchvision_sunner.data as sunnerData
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from networks_stylegan import StyleGenerator, StyleDiscriminator
from loss import gradient_penalty, R1Penalty, R2Penalty
from opts import TrainOptions, INFO
from torchvision.utils import save_image
from tqdm import tqdm
from matplotlib import pyplot as plt
from torch import nn
import torch
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import numpy as np
import random
import torch
import os
import sys
from datetime import datetime
from collections import defaultdict
import matplotlib.pyplot as plt
from IPython.core.debugger import set_trace
from utils import vis_batch, save_batch, collate_fn, load_config
from dataset import FashionEdgesDataset
# Set random seem for reproducibility
manualSeed = 999
#manualSeed = random.randint(1, 10000) # use if you want new results
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
# Hyper-parameters
CRITIC_ITER = 5
SAVE_SAMPLE_FREQ = 100
device = torch.cuda.current_device()
opts = TrainOptions().parse()
config = load_config('config.yaml')
# Create the model
start_epoch = 0
G = StyleGenerator()
D = StyleDiscriminator()
G.to(opts.device)
D.to(opts.device)
# Create dataset
train_set = FashionEdgesDataset(config.dataset.paths,
config.resolution,
only_edge = True,
pad_resolution = [1024,1024])
loader = DataLoader(dataset=train_set,
batch_size=config.batch_size,
collate_fn = collate_fn,
shuffle=True)
N = len(loader)
# Create the criterion, optimizer and scheduler
optim_D = optim.Adam(D.parameters(), lr=0.00001, betas=(0.5, 0.999))
optim_G = optim.Adam(G.parameters(), lr=0.00001, betas=(0.5, 0.999))
scheduler_D = optim.lr_scheduler.ExponentialLR(optim_D, gamma=0.99)
scheduler_G = optim.lr_scheduler.ExponentialLR(optim_G, gamma=0.99)
# create stuff
exp_path = './stylegan_logs/' + "@" + datetime.now().strftime("%d.%m.%Y-%H:%M:%S")
checkpoints_path = os.path.join(exp_path, 'checkpoints')
images_path = os.path.join(exp_path, 'images')
writer = SummaryWriter(os.path.join(exp_path, "tb"))
if not os.path.isdir(exp_path):
os.makedirs(exp_path)
if not os.path.isdir(checkpoints_path):
os.makedirs(checkpoints_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
print ('Experiment directory created:', exp_path)
# Train
fix_z = torch.randn([opts.batch_size, 512]).to(opts.device)
softplus = nn.Softplus()
for ep in range(start_epoch, opts.epoch):
bar = tqdm(loader)
metric_dict = defaultdict(list)
for i, real_img in enumerate(bar):
if real_img is None:
continue
n_iters_total = ep * N + i
# =======================================================================================================
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
# =======================================================================================================
# Compute adversarial loss toward discriminator
D.zero_grad()
real_img = real_img.to(opts.device) # NoneType sometines
real_logit = D(real_img)
fake_img = G(torch.randn([real_img.size(0), 512]).to(opts.device))
fake_logit = D(fake_img.detach())
d_loss = softplus(fake_logit).mean()
d_loss = d_loss + softplus(-real_logit).mean()
if opts.r1_gamma != 0.0:
r1_penalty = R1Penalty(real_img.detach(), D)
d_loss = d_loss + r1_penalty * (opts.r1_gamma * 0.5)
if opts.r2_gamma != 0.0:
r2_penalty = R2Penalty(fake_img.detach(), D)
d_loss = d_loss + r2_penalty * (opts.r2_gamma * 0.5)
metric_dict['loss_d'].append(d_loss.item())
# Update discriminator
d_loss.backward()
optim_D.step()
# =======================================================================================================
# (2) Update G network: maximize log(D(G(z)))
# =======================================================================================================
if i % CRITIC_ITER == 0:
G.zero_grad()
fake_logit = D(fake_img)
g_loss = softplus(-fake_logit).mean()
metric_dict['loss_g'].append(g_loss.item())
# Update generator
g_loss.backward()
optim_G.step()
# Output training stats
bar.set_description("Epoch {} [{}, {}] [G]: {} [D]: {}".format(ep, i+1, len(loader), metric_dict['loss_g'][-1], metric_dict['loss_d'][-1]))
for title, value in metric_dict.items():
writer.add_scalar(f"train/{title}", value[-1], n_iters_total)
# Check how the generator is doing by saving G's output on fixed_noise
if i%SAVE_SAMPLE_FREQ == 0:
with torch.no_grad():
fake_img = G(fix_z).detach().cpu()
save_image(fake_img, os.path.join(images_path, str(n_iters_total) + '.png'), nrow=4, normalize=True)
# Save model
state = {
'G': G.state_dict(),
'D': D.state_dict(),
'G_opt':optim_G.state_dict(),
'D_opt':optim_D.state_dict(),
'start_epoch': ep,
}
torch.save(state, os.path.join(checkpoints_path, 'latest.pth'))
scheduler_D.step()
scheduler_G.step()
# dump to tensorboard per-epoch stats
for title, value in metric_dict.items():
writer.add_scalar(f"epoch/{title}_epoch", np.mean(value), ep) | train_stylegan.py |
import torchvision_sunner.transforms as sunnertransforms
import torchvision_sunner.data as sunnerData
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from networks_stylegan import StyleGenerator, StyleDiscriminator
from loss import gradient_penalty, R1Penalty, R2Penalty
from opts import TrainOptions, INFO
from torchvision.utils import save_image
from tqdm import tqdm
from matplotlib import pyplot as plt
from torch import nn
import torch
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import numpy as np
import random
import torch
import os
import sys
from datetime import datetime
from collections import defaultdict
import matplotlib.pyplot as plt
from IPython.core.debugger import set_trace
from utils import vis_batch, save_batch, collate_fn, load_config
from dataset import FashionEdgesDataset
# Set random seem for reproducibility
manualSeed = 999
#manualSeed = random.randint(1, 10000) # use if you want new results
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
# Hyper-parameters
CRITIC_ITER = 5
SAVE_SAMPLE_FREQ = 100
device = torch.cuda.current_device()
opts = TrainOptions().parse()
config = load_config('config.yaml')
# Create the model
start_epoch = 0
G = StyleGenerator()
D = StyleDiscriminator()
G.to(opts.device)
D.to(opts.device)
# Create dataset
train_set = FashionEdgesDataset(config.dataset.paths,
config.resolution,
only_edge = True,
pad_resolution = [1024,1024])
loader = DataLoader(dataset=train_set,
batch_size=config.batch_size,
collate_fn = collate_fn,
shuffle=True)
N = len(loader)
# Create the criterion, optimizer and scheduler
optim_D = optim.Adam(D.parameters(), lr=0.00001, betas=(0.5, 0.999))
optim_G = optim.Adam(G.parameters(), lr=0.00001, betas=(0.5, 0.999))
scheduler_D = optim.lr_scheduler.ExponentialLR(optim_D, gamma=0.99)
scheduler_G = optim.lr_scheduler.ExponentialLR(optim_G, gamma=0.99)
# create stuff
exp_path = './stylegan_logs/' + "@" + datetime.now().strftime("%d.%m.%Y-%H:%M:%S")
checkpoints_path = os.path.join(exp_path, 'checkpoints')
images_path = os.path.join(exp_path, 'images')
writer = SummaryWriter(os.path.join(exp_path, "tb"))
if not os.path.isdir(exp_path):
os.makedirs(exp_path)
if not os.path.isdir(checkpoints_path):
os.makedirs(checkpoints_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
print ('Experiment directory created:', exp_path)
# Train
fix_z = torch.randn([opts.batch_size, 512]).to(opts.device)
softplus = nn.Softplus()
for ep in range(start_epoch, opts.epoch):
bar = tqdm(loader)
metric_dict = defaultdict(list)
for i, real_img in enumerate(bar):
if real_img is None:
continue
n_iters_total = ep * N + i
# =======================================================================================================
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
# =======================================================================================================
# Compute adversarial loss toward discriminator
D.zero_grad()
real_img = real_img.to(opts.device) # NoneType sometines
real_logit = D(real_img)
fake_img = G(torch.randn([real_img.size(0), 512]).to(opts.device))
fake_logit = D(fake_img.detach())
d_loss = softplus(fake_logit).mean()
d_loss = d_loss + softplus(-real_logit).mean()
if opts.r1_gamma != 0.0:
r1_penalty = R1Penalty(real_img.detach(), D)
d_loss = d_loss + r1_penalty * (opts.r1_gamma * 0.5)
if opts.r2_gamma != 0.0:
r2_penalty = R2Penalty(fake_img.detach(), D)
d_loss = d_loss + r2_penalty * (opts.r2_gamma * 0.5)
metric_dict['loss_d'].append(d_loss.item())
# Update discriminator
d_loss.backward()
optim_D.step()
# =======================================================================================================
# (2) Update G network: maximize log(D(G(z)))
# =======================================================================================================
if i % CRITIC_ITER == 0:
G.zero_grad()
fake_logit = D(fake_img)
g_loss = softplus(-fake_logit).mean()
metric_dict['loss_g'].append(g_loss.item())
# Update generator
g_loss.backward()
optim_G.step()
# Output training stats
bar.set_description("Epoch {} [{}, {}] [G]: {} [D]: {}".format(ep, i+1, len(loader), metric_dict['loss_g'][-1], metric_dict['loss_d'][-1]))
for title, value in metric_dict.items():
writer.add_scalar(f"train/{title}", value[-1], n_iters_total)
# Check how the generator is doing by saving G's output on fixed_noise
if i%SAVE_SAMPLE_FREQ == 0:
with torch.no_grad():
fake_img = G(fix_z).detach().cpu()
save_image(fake_img, os.path.join(images_path, str(n_iters_total) + '.png'), nrow=4, normalize=True)
# Save model
state = {
'G': G.state_dict(),
'D': D.state_dict(),
'G_opt':optim_G.state_dict(),
'D_opt':optim_D.state_dict(),
'start_epoch': ep,
}
torch.save(state, os.path.join(checkpoints_path, 'latest.pth'))
scheduler_D.step()
scheduler_G.step()
# dump to tensorboard per-epoch stats
for title, value in metric_dict.items():
writer.add_scalar(f"epoch/{title}_epoch", np.mean(value), ep) | 0.729327 | 0.346044 |
from collections import defaultdict
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from django.db.models import Q
from chroma_core.models import ManagedFilesystem, ManagedTarget
from chroma_core.models import ManagedOst, ManagedMdt, ManagedMgs
from chroma_core.models import Volume, VolumeNode
from chroma_core.models import Command
from chroma_core.models.filesystem import HSM_CONTROL_KEY, HSM_CONTROL_PARAMS
import tastypie.http as http
from tastypie import fields
from tastypie.validation import Validation
from tastypie.authorization import DjangoAuthorization
from chroma_api.authentication import AnonymousAuthentication
from chroma_api.utils import custom_response, ConfParamResource, MetricResource, dehydrate_command
from chroma_api.validation_utils import validate
from chroma_core.lib import conf_param
class FilesystemValidation(Validation):
def _validate_put(self, bundle, request):
errors = defaultdict(list)
if 'conf_params' in bundle.data and bundle.data['conf_params'] is not None:
try:
fs = ManagedFilesystem.objects.get(pk = bundle.data['id'])
except ManagedFilesystem.DoesNotExist:
errors['id'] = "Filesystem with id %s not found" % bundle.data['id']
except KeyError:
errors['id'] = "Field is mandatory"
else:
if fs.immutable_state:
if not conf_param.compare(bundle.data['conf_params'], conf_param.get_conf_params(fs)):
errors['conf_params'].append("Cannot modify conf_params on immutable_state objects")
else:
conf_param_errors = conf_param.validate_conf_params(ManagedFilesystem, bundle.data['conf_params'])
if conf_param_errors:
errors['conf_params'] = conf_param_errors
return errors
def _validate_post(self, bundle, request):
errors = defaultdict(list)
targets = defaultdict(list)
# Check 'mgt', 'mdts', 'osts' are present and compose
# a record of targets which will be formatted
try:
# Check that client hasn't specified an existing MGT
# *and* a volume to format.
if 'id' in bundle.data['mgt'] and 'volume_id' in bundle.data['mgt']:
errors['mgt'].append("id and volume_id are mutually exclusive")
mgt = bundle.data['mgt']
if 'volume_id' in mgt:
targets['mgt'].append(mgt)
except KeyError:
errors['mgt'].append("This field is mandatory")
try:
targets['mdts'].extend(bundle.data['mdts'])
except KeyError:
errors['mdts'].append("This field is mandatory")
try:
targets['osts'].extend(bundle.data['osts'])
except KeyError:
errors['osts'].append("This field is mandatory")
if 'conf_params' not in bundle.data:
errors['conf_params'].append("This field is mandatory")
if 'name' not in bundle.data:
errors['name'].append("This field is mandatory")
# Return if some of the things we're going to validate in detail are absent
if len(errors):
return errors
# As all fields are present we can be more specific about the errors.
errors['mgt'] = defaultdict(list)
errors['mdts'] = defaultdict(list)
errors['osts'] = defaultdict(list)
# Validate filesystem name
if len(bundle.data['name']) > 8:
errors['name'].append("Name '%s' too long (max 8 characters)" % bundle.data['name'])
if len(bundle.data['name']) < 1:
errors['name'].append("Name '%s' too short (min 1 character)" % bundle.data['name'])
if bundle.data['name'].find(" ") != -1:
errors['name'].append("Name may not contain spaces")
# Check volume IDs are present and correct
used_volume_ids = set()
def check_volume(field, volume_id):
# Check we haven't tried to use the same volume twice
if volume_id in used_volume_ids:
return "Volume ID %s specified for multiple targets!" % volume_id
try:
# Check the volume exists
volume = Volume.objects.get(id = volume_id)
try:
# Check the volume isn't in use
target = ManagedTarget.objects.get(volume = volume)
return "Volume with ID %s is already in use by target %s" % (volume_id, target)
except ManagedTarget.DoesNotExist:
pass
except Volume.DoesNotExist:
return "Volume with ID %s not found" % volume_id
used_volume_ids.add(volume_id)
try:
mgt_volume_id = bundle.data['mgt']['volume_id']
error = check_volume('mgt', mgt_volume_id)
if error:
errors['mgt']['volume_id'].append(error)
except KeyError:
mgt_volume_id = None
try:
mgt = ManagedMgs.objects.get(id = bundle.data['mgt']['id'])
if mgt.immutable_state:
errors['mgt']['id'].append("MGT is unmanaged")
try:
ManagedFilesystem.objects.get(name = bundle.data['name'], mgs = mgt)
errors['mgt']['name'].append("A file system with name '%s' already exists for this MGT" % bundle.data['name'])
except ManagedFilesystem.DoesNotExist:
pass
except KeyError:
errors['mgt']['id'].append("One of id or volume_id must be set")
except ManagedMgs.DoesNotExist:
errors['mgt']['id'].append("MGT with ID %s not found" % (bundle.data['mgt']['id']))
for mdt in bundle.data['mdts']:
try:
mdt_volume_id = mdt['volume_id']
check_volume('mdts', mdt_volume_id)
except KeyError:
errors['mdts']['volume_id'].append("volume_id attribute is mandatory for mdt " % mdt['id'])
for ost in bundle.data['osts']:
try:
volume_id = ost['volume_id']
check_volume('osts', volume_id)
except KeyError:
errors['osts']['volume_id'].append("volume_id attribute is mandatory for ost " % ost['id'])
# If formatting an MGS, check its not on a host already used as an MGS
# If this is an MGS, there may not be another MGS on
# this host
if mgt_volume_id:
mgt_volume = Volume.objects.get(id = mgt_volume_id)
hosts = [vn.host for vn in VolumeNode.objects.filter(volume = mgt_volume, use = True)]
conflicting_mgs_count = ManagedTarget.objects.filter(~Q(managedmgs = None), managedtargetmount__host__in = hosts).count()
if conflicting_mgs_count > 0:
errors['mgt']['volume_id'].append("Volume %s cannot be used for MGS (only one MGS is allowed per server)" % mgt_volume.label)
def validate_target(klass, target):
target_errors = defaultdict(list)
volume = Volume.objects.get(id = target['volume_id'])
if 'inode_count' in target and 'bytes_per_inode' in target:
target_errors['inode_count'].append("inode_count and bytes_per_inode are mutually exclusive")
if 'conf_params' in target:
conf_param_errors = conf_param.validate_conf_params(klass, target['conf_params'])
if conf_param_errors:
# FIXME: not really representing target-specific validations cleanly,
# will sort out while fixing HYD-1077.
target_errors['conf_params'] = conf_param_errors
for setting in ['inode_count', 'inode_size', 'bytes_per_inode']:
if setting in target:
if target[setting] is not None and not isinstance(target[setting], int):
target_errors[setting].append("Must be an integer")
# If they specify and inode size and a bytes_per_inode, check the inode fits
# within the ratio
try:
inode_size = target['inode_size']
bytes_per_inode = target['bytes_per_inode']
if inode_size >= bytes_per_inode:
target_errors['inode_size'].append("inode_size must be less than bytes_per_inode")
except KeyError:
pass
# If they specify an inode count, check it will fit on the device
try:
inode_count = target['inode_count']
except KeyError:
# If no inode_count is specified, no need to check it against inode_size
pass
else:
try:
inode_size = target['inode_size']
except KeyError:
inode_size = {ManagedMgs: 128, ManagedMdt: 512, ManagedOst: 256}[klass]
if inode_size is not None and inode_count is not None:
if inode_count * inode_size > volume.size:
target_errors['inode_count'].append("%d %d-byte inodes too large for %s-byte device" % (
inode_count, inode_size, volume.size))
return target_errors
# Validate generic target settings
for attr, targets in targets.items():
for target in targets:
klass = ManagedTarget.managed_target_of_type(attr[0:3]) # We get osts, mdts, mgs so just take the first 3 letters.
target_errors = validate_target(klass, target)
if target_errors:
errors[attr].update(target_errors)
conf_param_errors = conf_param.validate_conf_params(ManagedFilesystem, bundle.data['conf_params'])
if conf_param_errors:
errors['conf_params'] = conf_param_errors
def recursive_count(o):
"""Count the number of non-empty dicts/lists or other objects"""
if isinstance(o, dict):
c = 0
for v in o.values():
c += recursive_count(v)
return c
elif isinstance(o, list):
c = 0
for v in o:
c += recursive_count(v)
return c
else:
return 1
if not recursive_count(errors):
errors = {}
return errors
def is_valid(self, bundle, request=None):
if request.method == "POST":
return self._validate_post(bundle, request)
elif request.method == "PUT":
return self._validate_put(bundle, request)
else:
return {}
class FilesystemResource(MetricResource, ConfParamResource):
"""
A Lustre file system, associated with exactly one MGT and consisting of
one or mode MDTs and one or more OSTs.
When using POST to create a file system, specify volumes to use like this:
::
{osts: [{volume_id: 22}],
mdt: {volume_id: 23},
mgt: {volume_id: 24}}
To create a file system using an existing MGT instead of creating a new
MGT, set the `id` attribute instead of the `volume_id` attribute for
that target (i.e. `mgt: {id: 123}`).
Note: A Lustre file system is owned by an MGT, and the ``name`` of the file system
is unique within that MGT. Do not use ``name`` as a globally unique identifier
for a file system in your application.
"""
bytes_free = fields.IntegerField()
bytes_total = fields.IntegerField()
files_free = fields.IntegerField()
files_total = fields.IntegerField()
client_count = fields.IntegerField(help_text = "Number of Lustre clients which are connected to this file system")
mount_command = fields.CharField(null = True, help_text = "Example command for\
mounting this file system on a Lustre client, e.g. \"mount -t lustre 192.168.0.1:/testfs /mnt/testfs\"")
mount_path = fields.CharField(null = True, help_text = "Path for mounting the file system\
on a Lustre client, e.g. \"192.168.0.1:/testfs\"")
osts = fields.ToManyField('chroma_api.target.TargetResource', null = True,
attribute = lambda bundle: ManagedOst.objects.filter(filesystem = bundle.obj),
help_text = "List of OSTs which belong to this file system")
mdts = fields.ToManyField('chroma_api.target.TargetResource', null = True, full = True,
attribute = lambda bundle: ManagedMdt.objects.filter(filesystem = bundle.obj),
help_text = "List of MDTs in this file system, should be at least 1 unless the "
"file system is in the process of being deleted")
mgt = fields.ToOneField('chroma_api.target.TargetResource', attribute = 'mgs', full = True,
help_text = "The MGT on which this file system is registered")
def _get_stat_simple(self, bundle, klass, stat_name, factor = 1.0):
try:
return bundle.obj.metrics.fetch_last(klass, fetch_metrics=[stat_name])[1][stat_name] * factor
except (KeyError, IndexError, TypeError):
return None
def dehydrate_mount_path(self, bundle):
return bundle.obj.mount_path()
def dehydrate_mount_command(self, bundle):
path = self.dehydrate_mount_path(bundle)
if path:
return "mount -t lustre %s /mnt/%s" % (path, bundle.obj.name)
else:
return None
def dehydrate_bytes_free(self, bundle):
return self._get_stat_simple(bundle, ManagedOst, 'kbytesfree', 1024)
def dehydrate_bytes_total(self, bundle):
return self._get_stat_simple(bundle, ManagedOst, 'kbytestotal', 1024)
def dehydrate_files_free(self, bundle):
return self._get_stat_simple(bundle, ManagedMdt, 'filesfree')
def dehydrate_files_total(self, bundle):
return self._get_stat_simple(bundle, ManagedMdt, 'filestotal')
def get_hsm_control_params(self, mdt, bundle):
all_params = set(HSM_CONTROL_PARAMS.keys())
available_params = all_params - set([bundle.data['cdt_status']])
bundle_params = []
# Strip the mdt down for brevity of transport and also to
# avoid problems with the PUT.
(resource, id) = mdt.data['resource_uri'].split('/')[-3:-1]
safe_mdt = dict(
kind = mdt.data['kind'],
resource = resource,
id = id,
conf_params = mdt.data['conf_params']
)
for param in available_params:
bundle_params.append(dict(
mdt = safe_mdt,
param_key = HSM_CONTROL_KEY,
param_value = param,
verb = HSM_CONTROL_PARAMS[param]['verb'],
long_description = HSM_CONTROL_PARAMS[param]['long_description']
))
return bundle_params
def dehydrate(self, bundle):
# Have to do this here because we can't guarantee ordering during
# full_dehydrate to ensure that the mdt bundles are available.
try:
mdt = next(m for m in bundle.data['mdts'] if 'mdt.hsm_control' in m.data['conf_params'])
bundle.data['cdt_status'] = mdt.data['conf_params']['mdt.hsm_control']
bundle.data['cdt_mdt'] = mdt.data['resource_uri']
bundle.data['hsm_control_params'] = self.get_hsm_control_params(mdt, bundle)
except StopIteration:
pass
# Now the number of MDT's is known calculate the client count. The client count is calculated by the number of connections
# divided by the number of MDT's. In the case, that is possible durring creation and deletion of filesystems, where the mdt
# count is 0 then the connected clients must be zero.
if len(bundle.data['mdts']) == 0:
bundle.data['client_count'] = 0
else:
bundle.data['client_count'] = self._get_stat_simple(bundle, ManagedMdt, 'client_count', factor=1.0 / len(bundle.data['mdts']))
return bundle
class Meta:
queryset = ManagedFilesystem.objects.all()
resource_name = 'filesystem'
authorization = DjangoAuthorization()
authentication = AnonymousAuthentication()
excludes = ['not_deleted', 'ost_next_index', 'mdt_next_index']
ordering = ['name']
filtering = {'id': ['exact', 'in'], 'name': ['exact']}
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'delete', 'put']
readonly = ['bytes_free', 'bytes_total', 'files_free', 'files_total', 'client_count', 'mount_command', 'mount_path']
validation = FilesystemValidation()
always_return_data = True
@validate
def obj_create(self, bundle, **kwargs):
request = bundle.request
filesystem_id, command_id = JobSchedulerClient.create_filesystem(bundle.data)
filesystem = ManagedFilesystem.objects.get(pk = filesystem_id)
command = Command.objects.get(pk = command_id)
fs_bundle = self.full_dehydrate(self.build_bundle(obj = filesystem))
filesystem_data = self.alter_detail_data_to_serialize(request,
fs_bundle).data
raise custom_response(self, request, http.HttpAccepted,
{
'command': dehydrate_command(command),
'filesystem': filesystem_data
}) | chroma-manager/chroma_api/filesystem.py |
from collections import defaultdict
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from django.db.models import Q
from chroma_core.models import ManagedFilesystem, ManagedTarget
from chroma_core.models import ManagedOst, ManagedMdt, ManagedMgs
from chroma_core.models import Volume, VolumeNode
from chroma_core.models import Command
from chroma_core.models.filesystem import HSM_CONTROL_KEY, HSM_CONTROL_PARAMS
import tastypie.http as http
from tastypie import fields
from tastypie.validation import Validation
from tastypie.authorization import DjangoAuthorization
from chroma_api.authentication import AnonymousAuthentication
from chroma_api.utils import custom_response, ConfParamResource, MetricResource, dehydrate_command
from chroma_api.validation_utils import validate
from chroma_core.lib import conf_param
class FilesystemValidation(Validation):
def _validate_put(self, bundle, request):
errors = defaultdict(list)
if 'conf_params' in bundle.data and bundle.data['conf_params'] is not None:
try:
fs = ManagedFilesystem.objects.get(pk = bundle.data['id'])
except ManagedFilesystem.DoesNotExist:
errors['id'] = "Filesystem with id %s not found" % bundle.data['id']
except KeyError:
errors['id'] = "Field is mandatory"
else:
if fs.immutable_state:
if not conf_param.compare(bundle.data['conf_params'], conf_param.get_conf_params(fs)):
errors['conf_params'].append("Cannot modify conf_params on immutable_state objects")
else:
conf_param_errors = conf_param.validate_conf_params(ManagedFilesystem, bundle.data['conf_params'])
if conf_param_errors:
errors['conf_params'] = conf_param_errors
return errors
def _validate_post(self, bundle, request):
errors = defaultdict(list)
targets = defaultdict(list)
# Check 'mgt', 'mdts', 'osts' are present and compose
# a record of targets which will be formatted
try:
# Check that client hasn't specified an existing MGT
# *and* a volume to format.
if 'id' in bundle.data['mgt'] and 'volume_id' in bundle.data['mgt']:
errors['mgt'].append("id and volume_id are mutually exclusive")
mgt = bundle.data['mgt']
if 'volume_id' in mgt:
targets['mgt'].append(mgt)
except KeyError:
errors['mgt'].append("This field is mandatory")
try:
targets['mdts'].extend(bundle.data['mdts'])
except KeyError:
errors['mdts'].append("This field is mandatory")
try:
targets['osts'].extend(bundle.data['osts'])
except KeyError:
errors['osts'].append("This field is mandatory")
if 'conf_params' not in bundle.data:
errors['conf_params'].append("This field is mandatory")
if 'name' not in bundle.data:
errors['name'].append("This field is mandatory")
# Return if some of the things we're going to validate in detail are absent
if len(errors):
return errors
# As all fields are present we can be more specific about the errors.
errors['mgt'] = defaultdict(list)
errors['mdts'] = defaultdict(list)
errors['osts'] = defaultdict(list)
# Validate filesystem name
if len(bundle.data['name']) > 8:
errors['name'].append("Name '%s' too long (max 8 characters)" % bundle.data['name'])
if len(bundle.data['name']) < 1:
errors['name'].append("Name '%s' too short (min 1 character)" % bundle.data['name'])
if bundle.data['name'].find(" ") != -1:
errors['name'].append("Name may not contain spaces")
# Check volume IDs are present and correct
used_volume_ids = set()
def check_volume(field, volume_id):
# Check we haven't tried to use the same volume twice
if volume_id in used_volume_ids:
return "Volume ID %s specified for multiple targets!" % volume_id
try:
# Check the volume exists
volume = Volume.objects.get(id = volume_id)
try:
# Check the volume isn't in use
target = ManagedTarget.objects.get(volume = volume)
return "Volume with ID %s is already in use by target %s" % (volume_id, target)
except ManagedTarget.DoesNotExist:
pass
except Volume.DoesNotExist:
return "Volume with ID %s not found" % volume_id
used_volume_ids.add(volume_id)
try:
mgt_volume_id = bundle.data['mgt']['volume_id']
error = check_volume('mgt', mgt_volume_id)
if error:
errors['mgt']['volume_id'].append(error)
except KeyError:
mgt_volume_id = None
try:
mgt = ManagedMgs.objects.get(id = bundle.data['mgt']['id'])
if mgt.immutable_state:
errors['mgt']['id'].append("MGT is unmanaged")
try:
ManagedFilesystem.objects.get(name = bundle.data['name'], mgs = mgt)
errors['mgt']['name'].append("A file system with name '%s' already exists for this MGT" % bundle.data['name'])
except ManagedFilesystem.DoesNotExist:
pass
except KeyError:
errors['mgt']['id'].append("One of id or volume_id must be set")
except ManagedMgs.DoesNotExist:
errors['mgt']['id'].append("MGT with ID %s not found" % (bundle.data['mgt']['id']))
for mdt in bundle.data['mdts']:
try:
mdt_volume_id = mdt['volume_id']
check_volume('mdts', mdt_volume_id)
except KeyError:
errors['mdts']['volume_id'].append("volume_id attribute is mandatory for mdt " % mdt['id'])
for ost in bundle.data['osts']:
try:
volume_id = ost['volume_id']
check_volume('osts', volume_id)
except KeyError:
errors['osts']['volume_id'].append("volume_id attribute is mandatory for ost " % ost['id'])
# If formatting an MGS, check its not on a host already used as an MGS
# If this is an MGS, there may not be another MGS on
# this host
if mgt_volume_id:
mgt_volume = Volume.objects.get(id = mgt_volume_id)
hosts = [vn.host for vn in VolumeNode.objects.filter(volume = mgt_volume, use = True)]
conflicting_mgs_count = ManagedTarget.objects.filter(~Q(managedmgs = None), managedtargetmount__host__in = hosts).count()
if conflicting_mgs_count > 0:
errors['mgt']['volume_id'].append("Volume %s cannot be used for MGS (only one MGS is allowed per server)" % mgt_volume.label)
def validate_target(klass, target):
target_errors = defaultdict(list)
volume = Volume.objects.get(id = target['volume_id'])
if 'inode_count' in target and 'bytes_per_inode' in target:
target_errors['inode_count'].append("inode_count and bytes_per_inode are mutually exclusive")
if 'conf_params' in target:
conf_param_errors = conf_param.validate_conf_params(klass, target['conf_params'])
if conf_param_errors:
# FIXME: not really representing target-specific validations cleanly,
# will sort out while fixing HYD-1077.
target_errors['conf_params'] = conf_param_errors
for setting in ['inode_count', 'inode_size', 'bytes_per_inode']:
if setting in target:
if target[setting] is not None and not isinstance(target[setting], int):
target_errors[setting].append("Must be an integer")
# If they specify and inode size and a bytes_per_inode, check the inode fits
# within the ratio
try:
inode_size = target['inode_size']
bytes_per_inode = target['bytes_per_inode']
if inode_size >= bytes_per_inode:
target_errors['inode_size'].append("inode_size must be less than bytes_per_inode")
except KeyError:
pass
# If they specify an inode count, check it will fit on the device
try:
inode_count = target['inode_count']
except KeyError:
# If no inode_count is specified, no need to check it against inode_size
pass
else:
try:
inode_size = target['inode_size']
except KeyError:
inode_size = {ManagedMgs: 128, ManagedMdt: 512, ManagedOst: 256}[klass]
if inode_size is not None and inode_count is not None:
if inode_count * inode_size > volume.size:
target_errors['inode_count'].append("%d %d-byte inodes too large for %s-byte device" % (
inode_count, inode_size, volume.size))
return target_errors
# Validate generic target settings
for attr, targets in targets.items():
for target in targets:
klass = ManagedTarget.managed_target_of_type(attr[0:3]) # We get osts, mdts, mgs so just take the first 3 letters.
target_errors = validate_target(klass, target)
if target_errors:
errors[attr].update(target_errors)
conf_param_errors = conf_param.validate_conf_params(ManagedFilesystem, bundle.data['conf_params'])
if conf_param_errors:
errors['conf_params'] = conf_param_errors
def recursive_count(o):
"""Count the number of non-empty dicts/lists or other objects"""
if isinstance(o, dict):
c = 0
for v in o.values():
c += recursive_count(v)
return c
elif isinstance(o, list):
c = 0
for v in o:
c += recursive_count(v)
return c
else:
return 1
if not recursive_count(errors):
errors = {}
return errors
def is_valid(self, bundle, request=None):
if request.method == "POST":
return self._validate_post(bundle, request)
elif request.method == "PUT":
return self._validate_put(bundle, request)
else:
return {}
class FilesystemResource(MetricResource, ConfParamResource):
"""
A Lustre file system, associated with exactly one MGT and consisting of
one or mode MDTs and one or more OSTs.
When using POST to create a file system, specify volumes to use like this:
::
{osts: [{volume_id: 22}],
mdt: {volume_id: 23},
mgt: {volume_id: 24}}
To create a file system using an existing MGT instead of creating a new
MGT, set the `id` attribute instead of the `volume_id` attribute for
that target (i.e. `mgt: {id: 123}`).
Note: A Lustre file system is owned by an MGT, and the ``name`` of the file system
is unique within that MGT. Do not use ``name`` as a globally unique identifier
for a file system in your application.
"""
bytes_free = fields.IntegerField()
bytes_total = fields.IntegerField()
files_free = fields.IntegerField()
files_total = fields.IntegerField()
client_count = fields.IntegerField(help_text = "Number of Lustre clients which are connected to this file system")
mount_command = fields.CharField(null = True, help_text = "Example command for\
mounting this file system on a Lustre client, e.g. \"mount -t lustre 192.168.0.1:/testfs /mnt/testfs\"")
mount_path = fields.CharField(null = True, help_text = "Path for mounting the file system\
on a Lustre client, e.g. \"192.168.0.1:/testfs\"")
osts = fields.ToManyField('chroma_api.target.TargetResource', null = True,
attribute = lambda bundle: ManagedOst.objects.filter(filesystem = bundle.obj),
help_text = "List of OSTs which belong to this file system")
mdts = fields.ToManyField('chroma_api.target.TargetResource', null = True, full = True,
attribute = lambda bundle: ManagedMdt.objects.filter(filesystem = bundle.obj),
help_text = "List of MDTs in this file system, should be at least 1 unless the "
"file system is in the process of being deleted")
mgt = fields.ToOneField('chroma_api.target.TargetResource', attribute = 'mgs', full = True,
help_text = "The MGT on which this file system is registered")
def _get_stat_simple(self, bundle, klass, stat_name, factor = 1.0):
try:
return bundle.obj.metrics.fetch_last(klass, fetch_metrics=[stat_name])[1][stat_name] * factor
except (KeyError, IndexError, TypeError):
return None
def dehydrate_mount_path(self, bundle):
return bundle.obj.mount_path()
def dehydrate_mount_command(self, bundle):
path = self.dehydrate_mount_path(bundle)
if path:
return "mount -t lustre %s /mnt/%s" % (path, bundle.obj.name)
else:
return None
def dehydrate_bytes_free(self, bundle):
return self._get_stat_simple(bundle, ManagedOst, 'kbytesfree', 1024)
def dehydrate_bytes_total(self, bundle):
return self._get_stat_simple(bundle, ManagedOst, 'kbytestotal', 1024)
def dehydrate_files_free(self, bundle):
return self._get_stat_simple(bundle, ManagedMdt, 'filesfree')
def dehydrate_files_total(self, bundle):
return self._get_stat_simple(bundle, ManagedMdt, 'filestotal')
def get_hsm_control_params(self, mdt, bundle):
all_params = set(HSM_CONTROL_PARAMS.keys())
available_params = all_params - set([bundle.data['cdt_status']])
bundle_params = []
# Strip the mdt down for brevity of transport and also to
# avoid problems with the PUT.
(resource, id) = mdt.data['resource_uri'].split('/')[-3:-1]
safe_mdt = dict(
kind = mdt.data['kind'],
resource = resource,
id = id,
conf_params = mdt.data['conf_params']
)
for param in available_params:
bundle_params.append(dict(
mdt = safe_mdt,
param_key = HSM_CONTROL_KEY,
param_value = param,
verb = HSM_CONTROL_PARAMS[param]['verb'],
long_description = HSM_CONTROL_PARAMS[param]['long_description']
))
return bundle_params
def dehydrate(self, bundle):
# Have to do this here because we can't guarantee ordering during
# full_dehydrate to ensure that the mdt bundles are available.
try:
mdt = next(m for m in bundle.data['mdts'] if 'mdt.hsm_control' in m.data['conf_params'])
bundle.data['cdt_status'] = mdt.data['conf_params']['mdt.hsm_control']
bundle.data['cdt_mdt'] = mdt.data['resource_uri']
bundle.data['hsm_control_params'] = self.get_hsm_control_params(mdt, bundle)
except StopIteration:
pass
# Now the number of MDT's is known calculate the client count. The client count is calculated by the number of connections
# divided by the number of MDT's. In the case, that is possible durring creation and deletion of filesystems, where the mdt
# count is 0 then the connected clients must be zero.
if len(bundle.data['mdts']) == 0:
bundle.data['client_count'] = 0
else:
bundle.data['client_count'] = self._get_stat_simple(bundle, ManagedMdt, 'client_count', factor=1.0 / len(bundle.data['mdts']))
return bundle
class Meta:
queryset = ManagedFilesystem.objects.all()
resource_name = 'filesystem'
authorization = DjangoAuthorization()
authentication = AnonymousAuthentication()
excludes = ['not_deleted', 'ost_next_index', 'mdt_next_index']
ordering = ['name']
filtering = {'id': ['exact', 'in'], 'name': ['exact']}
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'delete', 'put']
readonly = ['bytes_free', 'bytes_total', 'files_free', 'files_total', 'client_count', 'mount_command', 'mount_path']
validation = FilesystemValidation()
always_return_data = True
@validate
def obj_create(self, bundle, **kwargs):
request = bundle.request
filesystem_id, command_id = JobSchedulerClient.create_filesystem(bundle.data)
filesystem = ManagedFilesystem.objects.get(pk = filesystem_id)
command = Command.objects.get(pk = command_id)
fs_bundle = self.full_dehydrate(self.build_bundle(obj = filesystem))
filesystem_data = self.alter_detail_data_to_serialize(request,
fs_bundle).data
raise custom_response(self, request, http.HttpAccepted,
{
'command': dehydrate_command(command),
'filesystem': filesystem_data
}) | 0.501221 | 0.124452 |
import requests
from bs4 import BeautifulSoup as bs
from itertools import chain, filterfalse
langs = {'python': ['(py)', '(pypy)', '(py3)'],
'ruby': ['(rb)']}
def get_pids(maxpage=17):
""" gatter problem ids and return it one by one """
baseurl = 'https://algospot.com/judge/problem/list/%d'
for pagenum in range(1, maxpage+1):
page = requests.get(baseurl % pagenum, timeout=None)
soup = bs(page.text)
tds = soup.find_all('td', class_='id')
for p in tds:
yield p.find('a').text.strip()
def solved_with(lang):
""" return a filter that checks if provided problem is ever solved with the
language or not
"""
if lang not in langs:
raise
target = langs[lang]
baseurl = 'https://algospot.com/judge/problem/stat/%(pid)s/%(page)d/'
def f(pid):
firstpage = requests.get(baseurl % {'pid': pid, 'page': 1})
soup = bs(firstpage.text)
maxpage = soup.find('span', class_='step-links').find_all('a')[-1].text
for pagenum in range(1, int(maxpage)+1):
page = requests.get(baseurl % {'pid': pid, 'page': pagenum})
soup = bs(page.text)
tds = chain(soup.find_all('td', class_='fastest'),
soup.find_all('td', class_='shortest'))
ans = ''.join(td.text for td in tds)
if any(t in ans for t in target):
return True
return False
return f
def solved_by(uid):
""" return a filter that checks if provided problem is ever solved by the
user or not. user is specified by user id, shown in his profile page url.
for example user fleo0917(https://algospot.com/user/profile/13227)'s user
id is '13227'
"""
solved = set()
baseurl = 'https://algospot.com/judge/problem/list/%(page)d?verdict=solved&user_tried=%(uid)s'
firstpage = requests.get(baseurl % {'uid': uid, 'page': 1})
soup = bs(firstpage.text)
maxpage = soup.find('span', class_='step-links').find_all('a')[-1].text
for pagenum in range(1, int(maxpage)+1):
page = requests.get(baseurl % {'uid': uid, 'page': pagenum})
soup = bs(page.text)
tds = soup.find_all('td', class_='id')
for p in tds:
solved.add(p.find('a').text.strip())
def f(pid):
return pid in solved
return f
def gen_url(pid):
""" return problem definition url """
return 'https://algospot.com/judge/problem/read/%s' % pid
if __name__ == '__main__':
probs = get_pids()
probs = filter(solved_with('python'), probs)
probs = filterfalse(solved_by('13227'), probs)
for p in probs:
print('[%s](%s)' % (p, gen_url(p))) | hard-gists/ef9e5c352e54769c4d43/snippet.py | import requests
from bs4 import BeautifulSoup as bs
from itertools import chain, filterfalse
langs = {'python': ['(py)', '(pypy)', '(py3)'],
'ruby': ['(rb)']}
def get_pids(maxpage=17):
""" gatter problem ids and return it one by one """
baseurl = 'https://algospot.com/judge/problem/list/%d'
for pagenum in range(1, maxpage+1):
page = requests.get(baseurl % pagenum, timeout=None)
soup = bs(page.text)
tds = soup.find_all('td', class_='id')
for p in tds:
yield p.find('a').text.strip()
def solved_with(lang):
""" return a filter that checks if provided problem is ever solved with the
language or not
"""
if lang not in langs:
raise
target = langs[lang]
baseurl = 'https://algospot.com/judge/problem/stat/%(pid)s/%(page)d/'
def f(pid):
firstpage = requests.get(baseurl % {'pid': pid, 'page': 1})
soup = bs(firstpage.text)
maxpage = soup.find('span', class_='step-links').find_all('a')[-1].text
for pagenum in range(1, int(maxpage)+1):
page = requests.get(baseurl % {'pid': pid, 'page': pagenum})
soup = bs(page.text)
tds = chain(soup.find_all('td', class_='fastest'),
soup.find_all('td', class_='shortest'))
ans = ''.join(td.text for td in tds)
if any(t in ans for t in target):
return True
return False
return f
def solved_by(uid):
""" return a filter that checks if provided problem is ever solved by the
user or not. user is specified by user id, shown in his profile page url.
for example user fleo0917(https://algospot.com/user/profile/13227)'s user
id is '13227'
"""
solved = set()
baseurl = 'https://algospot.com/judge/problem/list/%(page)d?verdict=solved&user_tried=%(uid)s'
firstpage = requests.get(baseurl % {'uid': uid, 'page': 1})
soup = bs(firstpage.text)
maxpage = soup.find('span', class_='step-links').find_all('a')[-1].text
for pagenum in range(1, int(maxpage)+1):
page = requests.get(baseurl % {'uid': uid, 'page': pagenum})
soup = bs(page.text)
tds = soup.find_all('td', class_='id')
for p in tds:
solved.add(p.find('a').text.strip())
def f(pid):
return pid in solved
return f
def gen_url(pid):
""" return problem definition url """
return 'https://algospot.com/judge/problem/read/%s' % pid
if __name__ == '__main__':
probs = get_pids()
probs = filter(solved_with('python'), probs)
probs = filterfalse(solved_by('13227'), probs)
for p in probs:
print('[%s](%s)' % (p, gen_url(p))) | 0.435421 | 0.119254 |
import json
import pathlib
import re as regex
from abc import ABC, abstractmethod
from typing import List, Optional
import requests
import semver
import cpo.utils.process
from cpo.config import configuration_manager
from cpo.lib.error import DataGateCLIException
class AbstractDependencyManagerPlugIn(ABC):
"""Base class of all dependency manager plug-in classes"""
@abstractmethod
def download_dependency_version(self, version: semver.VersionInfo):
"""Downloads the given version of the dependency
Parameters
----------
version
version of the dependency to be downloaded
"""
pass
def execute_binary(
self, args: List[str], capture_output=False, check=True, print_captured_output=False
) -> cpo.utils.process.ProcessResult:
"""Executes the binary associated with the dependency
If the dependency does not provide a binary, an exception is raised.
Parameters
----------
args
arguments to be passed to the binary
capture_output
flag indicating whether process output shall be captured
check
flag indicating whether an exception shall be thrown if the binary
returns with a nonzero return code
print_captured_output
flag indicating whether captured process output shall also be written to
stdout/stderr
Returns
-------
ProcessResult
object storing the return code and captured process output (if
requested)
"""
binary_path = self.get_binary_path()
if binary_path is None:
raise DataGateCLIException(f"Dependency '{self.get_dependency_name()} does not provide a binary'")
return cpo.utils.process.execute_command(
binary_path,
args,
capture_output=capture_output,
check=check,
print_captured_output=print_captured_output,
)
def get_binary_name(self) -> Optional[str]:
"""Returns the name of the binary associated with the dependency
Returns
-------
Optional[str]
name of the binary associated with the dependency or None if the
dependency does not provide a binary
"""
return None
def get_binary_path(self) -> Optional[pathlib.Path]:
"""Returns the path of the binary associated with the dependency
Returns
-------
Optional[pathlib.Path]
path of the binary associated with the dependency or None if the
dependency does not provide a binary
"""
binary_name = self.get_binary_name()
return configuration_manager.get_bin_directory_path() / binary_name if binary_name is not None else None
@abstractmethod
def get_dependency_alias(self) -> str:
"""Returns the alias of the dependency
The alias is used as a key in ~/.cpo/binaries.json to store the version
of the downloaded dependency.
Example:
{
[…]
"ibmcloud": "1.2.3",
[…]
}
Returns
-------
str
alias of the dependency
"""
pass
@abstractmethod
def get_dependency_name(self) -> str:
"""Returns the dependency name
Returns
-------
str
dependency name
"""
pass
@abstractmethod
def get_latest_dependency_version(self) -> semver.VersionInfo:
"""Returns the latest version of the dependency available at the
official download location
Returns
-------
semver.VersionInfo
latest version of the dependency available at the official download
location
"""
pass
def _get_latest_dependency_version_on_github(self, owner: str, repo: str) -> Optional[semver.VersionInfo]:
"""Returns the latest version of the dependency on GitHub
This method parses the "name" key of the JSON document returned by the
GitHub Releases API, which has the following structure:
[
{
"url": […],
"html_url": […],
"assets_url": […],
"upload_url": […],
"tarball_url": […],
"zipball_url": […],
"id": […],
"node_id": […],
"tag_name": […],
"target_commitish": […],
"name": "v1.0.0",
"body": […],
"draft": […],
"prerelease": […],
"created_at": […],
"published_at": […],
"author": {
[…]
},
"assets": [
[…]
]
},
{
[…]
"name": "v2.0.0",
[…]
},
[…]
]
GitHub Releases API: https://developer.github.com/v3/repos/releases/
Parameters
----------
owner
GitHub repository owner
repo
GitHub repository name
Returns
-------
Optional[semver.VersionInfo]
latest version of the dependency or None if no release was found
"""
response = requests.get(f"https://api.github.com/repos/{owner}/{repo}/releases")
response.raise_for_status()
response_json = json.loads(response.content)
result: Optional[semver.VersionInfo] = None
for release in response_json:
search_result = regex.search(
"v(\\d+\\.\\d+\\.\\d+)$",
release["name"],
)
if search_result is not None:
result = semver.VersionInfo.parse(search_result.group(1))
break
return result | cpo/lib/dependency_manager/dependency_manager_plugin.py |
import json
import pathlib
import re as regex
from abc import ABC, abstractmethod
from typing import List, Optional
import requests
import semver
import cpo.utils.process
from cpo.config import configuration_manager
from cpo.lib.error import DataGateCLIException
class AbstractDependencyManagerPlugIn(ABC):
"""Base class of all dependency manager plug-in classes"""
@abstractmethod
def download_dependency_version(self, version: semver.VersionInfo):
"""Downloads the given version of the dependency
Parameters
----------
version
version of the dependency to be downloaded
"""
pass
def execute_binary(
self, args: List[str], capture_output=False, check=True, print_captured_output=False
) -> cpo.utils.process.ProcessResult:
"""Executes the binary associated with the dependency
If the dependency does not provide a binary, an exception is raised.
Parameters
----------
args
arguments to be passed to the binary
capture_output
flag indicating whether process output shall be captured
check
flag indicating whether an exception shall be thrown if the binary
returns with a nonzero return code
print_captured_output
flag indicating whether captured process output shall also be written to
stdout/stderr
Returns
-------
ProcessResult
object storing the return code and captured process output (if
requested)
"""
binary_path = self.get_binary_path()
if binary_path is None:
raise DataGateCLIException(f"Dependency '{self.get_dependency_name()} does not provide a binary'")
return cpo.utils.process.execute_command(
binary_path,
args,
capture_output=capture_output,
check=check,
print_captured_output=print_captured_output,
)
def get_binary_name(self) -> Optional[str]:
"""Returns the name of the binary associated with the dependency
Returns
-------
Optional[str]
name of the binary associated with the dependency or None if the
dependency does not provide a binary
"""
return None
def get_binary_path(self) -> Optional[pathlib.Path]:
"""Returns the path of the binary associated with the dependency
Returns
-------
Optional[pathlib.Path]
path of the binary associated with the dependency or None if the
dependency does not provide a binary
"""
binary_name = self.get_binary_name()
return configuration_manager.get_bin_directory_path() / binary_name if binary_name is not None else None
@abstractmethod
def get_dependency_alias(self) -> str:
"""Returns the alias of the dependency
The alias is used as a key in ~/.cpo/binaries.json to store the version
of the downloaded dependency.
Example:
{
[…]
"ibmcloud": "1.2.3",
[…]
}
Returns
-------
str
alias of the dependency
"""
pass
@abstractmethod
def get_dependency_name(self) -> str:
"""Returns the dependency name
Returns
-------
str
dependency name
"""
pass
@abstractmethod
def get_latest_dependency_version(self) -> semver.VersionInfo:
"""Returns the latest version of the dependency available at the
official download location
Returns
-------
semver.VersionInfo
latest version of the dependency available at the official download
location
"""
pass
def _get_latest_dependency_version_on_github(self, owner: str, repo: str) -> Optional[semver.VersionInfo]:
"""Returns the latest version of the dependency on GitHub
This method parses the "name" key of the JSON document returned by the
GitHub Releases API, which has the following structure:
[
{
"url": […],
"html_url": […],
"assets_url": […],
"upload_url": […],
"tarball_url": […],
"zipball_url": […],
"id": […],
"node_id": […],
"tag_name": […],
"target_commitish": […],
"name": "v1.0.0",
"body": […],
"draft": […],
"prerelease": […],
"created_at": […],
"published_at": […],
"author": {
[…]
},
"assets": [
[…]
]
},
{
[…]
"name": "v2.0.0",
[…]
},
[…]
]
GitHub Releases API: https://developer.github.com/v3/repos/releases/
Parameters
----------
owner
GitHub repository owner
repo
GitHub repository name
Returns
-------
Optional[semver.VersionInfo]
latest version of the dependency or None if no release was found
"""
response = requests.get(f"https://api.github.com/repos/{owner}/{repo}/releases")
response.raise_for_status()
response_json = json.loads(response.content)
result: Optional[semver.VersionInfo] = None
for release in response_json:
search_result = regex.search(
"v(\\d+\\.\\d+\\.\\d+)$",
release["name"],
)
if search_result is not None:
result = semver.VersionInfo.parse(search_result.group(1))
break
return result | 0.900115 | 0.340787 |
from pathlib import Path
from typing import Mapping, Optional
from hamcrest import (
assert_that,
contains,
equal_to,
is_,
)
from microcosm_sagemaker.testing.bytes_extractor import ExtractorMatcherPair
def _identity(x):
return x
def _is_hidden(path: Path) -> bool:
return path.name.startswith(".")
def directory_comparison(
gold_dir: Path,
actual_dir: Path,
matchers: Optional[Mapping[Path, ExtractorMatcherPair]] = None,
ignore_hidden: bool = True,
ignore_file_contents: bool = False,
):
"""
Recursively checks the contents of `actual_dir` against the expected
contents in `gold_dir`. It is also possible to leave certain files out of
the gold dir, and instead specify an (extractor, matcher) pair that should
be used to extract and match the contents of the given file instead.
By default, this function ignores hidden files. This functionality is
useful when you expect an empty directory, because git won't allow checking
in an empty directory. In this situation you can add an empty `.keep` file
to the directory to make sure it is checked in.
"""
if matchers is None:
matchers = dict()
assert_that(gold_dir.exists(), is_(True))
assert_that(actual_dir.exists(), is_(True))
actual_paths = sorted([
subpath.relative_to(actual_dir)
for subpath in actual_dir.glob('**/*')
if not (ignore_hidden and _is_hidden(subpath)) # exclude hidden files if ignore_hidden is True
])
gold_paths = sorted([
subpath.relative_to(gold_dir)
for subpath in gold_dir.glob('**/*')
if not (ignore_hidden and _is_hidden(subpath)) # exclude hidden files if ignore_hidden is True
])
assert_that(actual_paths, contains(*gold_paths))
for path in gold_paths:
gold_path = gold_dir / path
actual_path = actual_dir / path
if gold_path.is_dir():
assert_that(actual_path.is_dir(), is_(True))
else:
assert_that(actual_path.is_dir(), is_(False))
if not ignore_file_contents:
if path in matchers:
extractor, matcher_constructor = matchers[path]
else:
extractor, matcher_constructor = ExtractorMatcherPair(
_identity,
lambda x: is_(equal_to(x)),
)
assert_that(
extractor(actual_path.read_bytes()),
matcher_constructor(
extractor(gold_path.read_bytes()),
),
path,
) | microcosm_sagemaker/testing/directory_comparison.py | from pathlib import Path
from typing import Mapping, Optional
from hamcrest import (
assert_that,
contains,
equal_to,
is_,
)
from microcosm_sagemaker.testing.bytes_extractor import ExtractorMatcherPair
def _identity(x):
return x
def _is_hidden(path: Path) -> bool:
return path.name.startswith(".")
def directory_comparison(
gold_dir: Path,
actual_dir: Path,
matchers: Optional[Mapping[Path, ExtractorMatcherPair]] = None,
ignore_hidden: bool = True,
ignore_file_contents: bool = False,
):
"""
Recursively checks the contents of `actual_dir` against the expected
contents in `gold_dir`. It is also possible to leave certain files out of
the gold dir, and instead specify an (extractor, matcher) pair that should
be used to extract and match the contents of the given file instead.
By default, this function ignores hidden files. This functionality is
useful when you expect an empty directory, because git won't allow checking
in an empty directory. In this situation you can add an empty `.keep` file
to the directory to make sure it is checked in.
"""
if matchers is None:
matchers = dict()
assert_that(gold_dir.exists(), is_(True))
assert_that(actual_dir.exists(), is_(True))
actual_paths = sorted([
subpath.relative_to(actual_dir)
for subpath in actual_dir.glob('**/*')
if not (ignore_hidden and _is_hidden(subpath)) # exclude hidden files if ignore_hidden is True
])
gold_paths = sorted([
subpath.relative_to(gold_dir)
for subpath in gold_dir.glob('**/*')
if not (ignore_hidden and _is_hidden(subpath)) # exclude hidden files if ignore_hidden is True
])
assert_that(actual_paths, contains(*gold_paths))
for path in gold_paths:
gold_path = gold_dir / path
actual_path = actual_dir / path
if gold_path.is_dir():
assert_that(actual_path.is_dir(), is_(True))
else:
assert_that(actual_path.is_dir(), is_(False))
if not ignore_file_contents:
if path in matchers:
extractor, matcher_constructor = matchers[path]
else:
extractor, matcher_constructor = ExtractorMatcherPair(
_identity,
lambda x: is_(equal_to(x)),
)
assert_that(
extractor(actual_path.read_bytes()),
matcher_constructor(
extractor(gold_path.read_bytes()),
),
path,
) | 0.92095 | 0.460956 |
import os
import sys
import pexpect
def close_benchmark(benchmark):
benchmark.expect(pexpect.EOF, timeout=None)
benchmark.close()
def check_exit_status(benchmark):
if benchmark.exitstatus != 0 or benchmark.signalstatus is not None:
print(
"Benchmark failed with exit status "
+ str(benchmark.exitstatus)
+ " and signal status "
+ str(benchmark.signalstatus)
)
sys.exit(1)
def check_json(json, argument, error, return_error, difference=None):
if type(json) is not float:
if json != argument:
print("ERROR: " + error + " " + str(json) + " " + str(argument))
return True
else:
if abs(json - argument) > difference:
print("ERROR: " + error + " " + str(json) + " " + str(argument) + " " + str(difference))
return True
return return_error
def initialize(arguments, benchmark_name, verbose):
if len(sys.argv) == 1:
print("Usage: ./scripts/test/" + benchmark_name + "_test.py <build_dir>")
sys.exit(1)
if "--table_path" in arguments and not os.path.isdir(arguments["--table_path"].replace("'", "")):
print(
"Cannot find "
+ arguments["--table_path"]
+ ". Are you running the test suite from the main folder of the Hyrise repository?"
)
sys.exit(1)
if "--query_path" in arguments and not os.path.isdir(arguments["--query_path"].replace("'", "")):
print(
"Cannot find "
+ arguments["--query_path"]
+ ". Are you running the test suite from the main folder of the Hyrise repository?"
)
sys.exit(1)
build_dir = sys.argv[1]
concat_arguments = " ".join(["=".join(map(str, x)) for x in arguments.items()])
benchmark = pexpect.spawn(
build_dir + "/" + benchmark_name + " " + concat_arguments, maxread=1000000, timeout=1000, dimensions=(200, 64)
)
if verbose:
benchmark.logfile = sys.stdout.buffer
return benchmark | scripts/test/hyriseBenchmarkCore.py | import os
import sys
import pexpect
def close_benchmark(benchmark):
benchmark.expect(pexpect.EOF, timeout=None)
benchmark.close()
def check_exit_status(benchmark):
if benchmark.exitstatus != 0 or benchmark.signalstatus is not None:
print(
"Benchmark failed with exit status "
+ str(benchmark.exitstatus)
+ " and signal status "
+ str(benchmark.signalstatus)
)
sys.exit(1)
def check_json(json, argument, error, return_error, difference=None):
if type(json) is not float:
if json != argument:
print("ERROR: " + error + " " + str(json) + " " + str(argument))
return True
else:
if abs(json - argument) > difference:
print("ERROR: " + error + " " + str(json) + " " + str(argument) + " " + str(difference))
return True
return return_error
def initialize(arguments, benchmark_name, verbose):
if len(sys.argv) == 1:
print("Usage: ./scripts/test/" + benchmark_name + "_test.py <build_dir>")
sys.exit(1)
if "--table_path" in arguments and not os.path.isdir(arguments["--table_path"].replace("'", "")):
print(
"Cannot find "
+ arguments["--table_path"]
+ ". Are you running the test suite from the main folder of the Hyrise repository?"
)
sys.exit(1)
if "--query_path" in arguments and not os.path.isdir(arguments["--query_path"].replace("'", "")):
print(
"Cannot find "
+ arguments["--query_path"]
+ ". Are you running the test suite from the main folder of the Hyrise repository?"
)
sys.exit(1)
build_dir = sys.argv[1]
concat_arguments = " ".join(["=".join(map(str, x)) for x in arguments.items()])
benchmark = pexpect.spawn(
build_dir + "/" + benchmark_name + " " + concat_arguments, maxread=1000000, timeout=1000, dimensions=(200, 64)
)
if verbose:
benchmark.logfile = sys.stdout.buffer
return benchmark | 0.189896 | 0.128963 |
from telethon.tl import functions
from telethon.tl.types import MessageEntityMentionName
from . import *
@bot.on(phoenix_cmd(pattern="create (b|g|c) (.*)")) # pylint:disable=E0602
@bot.on(sudo_cmd(pattern="create (b|g|c) (.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
type_of_group = event.pattern_match.group(1)
group_name = event.pattern_match.group(2)
event = await edit_or_reply(event, "Creating wait sar.....")
if type_of_group == "b":
try:
result = await event.client(
functions.messages.CreateChatRequest( # pylint:disable=E0602
users=["@MissRose_Bot"],
# Not enough users (to create a chat, for example)
# Telegram, no longer allows creating a chat with ourselves
title=group_name,
)
)
created_chat_id = result.chats[0].id
await event.client(
functions.messages.DeleteChatUserRequest(
chat_id=created_chat_id, user_id="@MissRose_Bot"
)
)
result = await event.client(
functions.messages.ExportChatInviteRequest(
peer=created_chat_id,
)
)
await event.edit(
"Group `{}` created successfully. Join {}".format(
group_name, result.link
)
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
elif type_of_group in ["g", "c"]:
try:
r = await event.client(
functions.channels.CreateChannelRequest(
title=group_name,
about="Created By PHOENIX",
megagroup=type_of_group != "c",
)
)
created_chat_id = r.chats[0].id
result = await event.client(
functions.messages.ExportChatInviteRequest(
peer=created_chat_id,
)
)
await event.edit(
"Channel `{}` created successfully. Join {}".format(
group_name, result.link
)
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
await event.edit(f"Read `{hl}plinfo create` to know how to use me")
@bot.on(phoenix_cmd(pattern="link(?: |$)(.*)", outgoing=True))
@bot.on(sudo_cmd(pattern="link(?: |$)(.*)", allow_sudo=True))
async def permalink(mention):
if mention.fwd_from:
return
""" For .link command, generates a link to the user's PM with a custom text. """
user, custom = await get_user_from_event(mention)
if not user:
return
if custom:
await edit_or_reply(mention, f"[{custom}](tg://user?id={user.id}) \n\n\n ☝️ Tap To See ☝️")
else:
tag = (
user.first_name.replace("\u2060", "") if user.first_name else user.username
)
await edit_or_reply(mention, f"[{tag}](tg://user?id={user.id}) \n\n ☝️ Tap to See ☝️")
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(":", 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.sender_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit("`Pass the user's username, id or reply!`")
return
if event.message.entities:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj, extra
async def get_user_from_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
CmdHelp("create").add_command(
'create b', 'Name of your grp', 'Creates a super and send you link'
).add_command(
'create g', 'Name of your grp', 'Creates a private grp and send you link'
).add_command(
'create c', 'Name of your channel', 'Creates a channel and sends you link'
).add_command(
'link', '<reply> <text>', 'Makes a permanent link of tagged user with a custom text'
).add_info(
'Creates Groups'
).add_warning(
'✅ Harmless Module'
).add() | Phoenix/plugins/create.py | from telethon.tl import functions
from telethon.tl.types import MessageEntityMentionName
from . import *
@bot.on(phoenix_cmd(pattern="create (b|g|c) (.*)")) # pylint:disable=E0602
@bot.on(sudo_cmd(pattern="create (b|g|c) (.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
type_of_group = event.pattern_match.group(1)
group_name = event.pattern_match.group(2)
event = await edit_or_reply(event, "Creating wait sar.....")
if type_of_group == "b":
try:
result = await event.client(
functions.messages.CreateChatRequest( # pylint:disable=E0602
users=["@MissRose_Bot"],
# Not enough users (to create a chat, for example)
# Telegram, no longer allows creating a chat with ourselves
title=group_name,
)
)
created_chat_id = result.chats[0].id
await event.client(
functions.messages.DeleteChatUserRequest(
chat_id=created_chat_id, user_id="@MissRose_Bot"
)
)
result = await event.client(
functions.messages.ExportChatInviteRequest(
peer=created_chat_id,
)
)
await event.edit(
"Group `{}` created successfully. Join {}".format(
group_name, result.link
)
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
elif type_of_group in ["g", "c"]:
try:
r = await event.client(
functions.channels.CreateChannelRequest(
title=group_name,
about="Created By PHOENIX",
megagroup=type_of_group != "c",
)
)
created_chat_id = r.chats[0].id
result = await event.client(
functions.messages.ExportChatInviteRequest(
peer=created_chat_id,
)
)
await event.edit(
"Channel `{}` created successfully. Join {}".format(
group_name, result.link
)
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
await event.edit(f"Read `{hl}plinfo create` to know how to use me")
@bot.on(phoenix_cmd(pattern="link(?: |$)(.*)", outgoing=True))
@bot.on(sudo_cmd(pattern="link(?: |$)(.*)", allow_sudo=True))
async def permalink(mention):
if mention.fwd_from:
return
""" For .link command, generates a link to the user's PM with a custom text. """
user, custom = await get_user_from_event(mention)
if not user:
return
if custom:
await edit_or_reply(mention, f"[{custom}](tg://user?id={user.id}) \n\n\n ☝️ Tap To See ☝️")
else:
tag = (
user.first_name.replace("\u2060", "") if user.first_name else user.username
)
await edit_or_reply(mention, f"[{tag}](tg://user?id={user.id}) \n\n ☝️ Tap to See ☝️")
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(":", 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.sender_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit("`Pass the user's username, id or reply!`")
return
if event.message.entities:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj, extra
async def get_user_from_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
CmdHelp("create").add_command(
'create b', 'Name of your grp', 'Creates a super and send you link'
).add_command(
'create g', 'Name of your grp', 'Creates a private grp and send you link'
).add_command(
'create c', 'Name of your channel', 'Creates a channel and sends you link'
).add_command(
'link', '<reply> <text>', 'Makes a permanent link of tagged user with a custom text'
).add_info(
'Creates Groups'
).add_warning(
'✅ Harmless Module'
).add() | 0.421195 | 0.136234 |
import logging
import os
import pyauto_media
import pyauto
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_basic_playback.html')
# Test videos to play. TODO(dalecurtis): Convert to text matrix parser when we
# have more test videos in the matrix. Code already written, see patch here:
# https://chromiumcodereview.appspot.com/9290008/#ps12
_TEST_VIDEOS = [
'bear.mp4', 'bear.ogv', 'bear.webm', 'bear_silent.mp4', 'bear_silent.ogv',
'bear_silent.webm']
# Expected events for the first iteration and every iteration thereafter.
_EXPECTED_EVENTS_0 = [('ended', 2), ('playing', 2), ('seeked', 1)]
_EXPECTED_EVENTS_n = [('abort', 1), ('emptied', 1)] + _EXPECTED_EVENTS_0
class MediaConstrainedNetworkPerfTest(pyauto.PyUITest):
"""PyAuto test container. See file doc string for more information."""
def testBasicPlaybackMatrix(self):
"""Launches HTML test which plays each video until end, seeks, and replays.
Specifically ensures that after the above sequence of events, the following
are true:
1. The first video has only 2x playing, 2x ended, and 1x seeked events.
2. Each subsequent video additionally has 1x abort and 1x emptied due to
switching of the src attribute.
3. video.currentTime == video.duration for each video.
See the HTML file at _TEST_HTML_PATH for more information.
"""
self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))
for i, media in enumerate(_TEST_VIDEOS):
logging.debug('Running basic playback test for %s', media)
# Block until the test finishes and notifies us. Upon return the value of
# video.currentTime == video.duration is provided.
try:
self.assertTrue(self.ExecuteJavascript("startTest('%s');" % media))
# PyAuto has trouble with arrays, so convert to string prior to request.
events = self.GetDOMValue("events.join(',')").split(',')
counts = [(item, events.count(item)) for item in sorted(set(events))]
# The first loop will not have the abort and emptied events triggered by
# changing the video src.
if (i == 0):
self.assertEqual(counts, _EXPECTED_EVENTS_0)
else:
self.assertEqual(counts, _EXPECTED_EVENTS_n)
except:
logging.debug(
'Test failed with events: %s', self.GetDOMValue("events.join(',')"))
raise
if __name__ == '__main__':
pyauto_media.Main() | chrome/test/functional/media/media_basic_playback.py | import logging
import os
import pyauto_media
import pyauto
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_basic_playback.html')
# Test videos to play. TODO(dalecurtis): Convert to text matrix parser when we
# have more test videos in the matrix. Code already written, see patch here:
# https://chromiumcodereview.appspot.com/9290008/#ps12
_TEST_VIDEOS = [
'bear.mp4', 'bear.ogv', 'bear.webm', 'bear_silent.mp4', 'bear_silent.ogv',
'bear_silent.webm']
# Expected events for the first iteration and every iteration thereafter.
_EXPECTED_EVENTS_0 = [('ended', 2), ('playing', 2), ('seeked', 1)]
_EXPECTED_EVENTS_n = [('abort', 1), ('emptied', 1)] + _EXPECTED_EVENTS_0
class MediaConstrainedNetworkPerfTest(pyauto.PyUITest):
"""PyAuto test container. See file doc string for more information."""
def testBasicPlaybackMatrix(self):
"""Launches HTML test which plays each video until end, seeks, and replays.
Specifically ensures that after the above sequence of events, the following
are true:
1. The first video has only 2x playing, 2x ended, and 1x seeked events.
2. Each subsequent video additionally has 1x abort and 1x emptied due to
switching of the src attribute.
3. video.currentTime == video.duration for each video.
See the HTML file at _TEST_HTML_PATH for more information.
"""
self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))
for i, media in enumerate(_TEST_VIDEOS):
logging.debug('Running basic playback test for %s', media)
# Block until the test finishes and notifies us. Upon return the value of
# video.currentTime == video.duration is provided.
try:
self.assertTrue(self.ExecuteJavascript("startTest('%s');" % media))
# PyAuto has trouble with arrays, so convert to string prior to request.
events = self.GetDOMValue("events.join(',')").split(',')
counts = [(item, events.count(item)) for item in sorted(set(events))]
# The first loop will not have the abort and emptied events triggered by
# changing the video src.
if (i == 0):
self.assertEqual(counts, _EXPECTED_EVENTS_0)
else:
self.assertEqual(counts, _EXPECTED_EVENTS_n)
except:
logging.debug(
'Test failed with events: %s', self.GetDOMValue("events.join(',')"))
raise
if __name__ == '__main__':
pyauto_media.Main() | 0.375936 | 0.427815 |
import pprint
import re # noqa: F401
import six
from hubspot.crm.schemas.configuration import Configuration
class ModelProperty(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"updated_at": "datetime",
"created_at": "datetime",
"archived_at": "datetime",
"name": "str",
"label": "str",
"type": "str",
"field_type": "str",
"description": "str",
"group_name": "str",
"options": "list[Option]",
"created_user_id": "str",
"updated_user_id": "str",
"referenced_object_type": "str",
"display_order": "int",
"calculated": "bool",
"external_options": "bool",
"archived": "bool",
"has_unique_value": "bool",
"hidden": "bool",
"hubspot_defined": "bool",
"show_currency_symbol": "bool",
"modification_metadata": "PropertyModificationMetadata",
"form_field": "bool",
}
attribute_map = {
"updated_at": "updatedAt",
"created_at": "createdAt",
"archived_at": "archivedAt",
"name": "name",
"label": "label",
"type": "type",
"field_type": "fieldType",
"description": "description",
"group_name": "groupName",
"options": "options",
"created_user_id": "createdUserId",
"updated_user_id": "updatedUserId",
"referenced_object_type": "referencedObjectType",
"display_order": "displayOrder",
"calculated": "calculated",
"external_options": "externalOptions",
"archived": "archived",
"has_unique_value": "hasUniqueValue",
"hidden": "hidden",
"hubspot_defined": "hubspotDefined",
"show_currency_symbol": "showCurrencySymbol",
"modification_metadata": "modificationMetadata",
"form_field": "formField",
}
def __init__(
self,
updated_at=None,
created_at=None,
archived_at=None,
name=None,
label=None,
type=None,
field_type=None,
description=None,
group_name=None,
options=None,
created_user_id=None,
updated_user_id=None,
referenced_object_type=None,
display_order=None,
calculated=None,
external_options=None,
archived=None,
has_unique_value=None,
hidden=None,
hubspot_defined=None,
show_currency_symbol=None,
modification_metadata=None,
form_field=None,
local_vars_configuration=None,
): # noqa: E501
"""ModelProperty - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._updated_at = None
self._created_at = None
self._archived_at = None
self._name = None
self._label = None
self._type = None
self._field_type = None
self._description = None
self._group_name = None
self._options = None
self._created_user_id = None
self._updated_user_id = None
self._referenced_object_type = None
self._display_order = None
self._calculated = None
self._external_options = None
self._archived = None
self._has_unique_value = None
self._hidden = None
self._hubspot_defined = None
self._show_currency_symbol = None
self._modification_metadata = None
self._form_field = None
self.discriminator = None
if updated_at is not None:
self.updated_at = updated_at
if created_at is not None:
self.created_at = created_at
if archived_at is not None:
self.archived_at = archived_at
self.name = name
self.label = label
self.type = type
self.field_type = field_type
self.description = description
self.group_name = group_name
self.options = options
if created_user_id is not None:
self.created_user_id = created_user_id
if updated_user_id is not None:
self.updated_user_id = updated_user_id
if referenced_object_type is not None:
self.referenced_object_type = referenced_object_type
if display_order is not None:
self.display_order = display_order
if calculated is not None:
self.calculated = calculated
if external_options is not None:
self.external_options = external_options
if archived is not None:
self.archived = archived
if has_unique_value is not None:
self.has_unique_value = has_unique_value
if hidden is not None:
self.hidden = hidden
if hubspot_defined is not None:
self.hubspot_defined = hubspot_defined
if show_currency_symbol is not None:
self.show_currency_symbol = show_currency_symbol
if modification_metadata is not None:
self.modification_metadata = modification_metadata
if form_field is not None:
self.form_field = form_field
@property
def updated_at(self):
"""Gets the updated_at of this ModelProperty. # noqa: E501
:return: The updated_at of this ModelProperty. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this ModelProperty.
:param updated_at: The updated_at of this ModelProperty. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def created_at(self):
"""Gets the created_at of this ModelProperty. # noqa: E501
When the property was created # noqa: E501
:return: The created_at of this ModelProperty. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ModelProperty.
When the property was created # noqa: E501
:param created_at: The created_at of this ModelProperty. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def archived_at(self):
"""Gets the archived_at of this ModelProperty. # noqa: E501
When the property was archived. # noqa: E501
:return: The archived_at of this ModelProperty. # noqa: E501
:rtype: datetime
"""
return self._archived_at
@archived_at.setter
def archived_at(self, archived_at):
"""Sets the archived_at of this ModelProperty.
When the property was archived. # noqa: E501
:param archived_at: The archived_at of this ModelProperty. # noqa: E501
:type: datetime
"""
self._archived_at = archived_at
@property
def name(self):
"""Gets the name of this ModelProperty. # noqa: E501
The internal property name, which must be used when referencing the property via the API. # noqa: E501
:return: The name of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ModelProperty.
The internal property name, which must be used when referencing the property via the API. # noqa: E501
:param name: The name of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def label(self):
"""Gets the label of this ModelProperty. # noqa: E501
A human-readable property label that will be shown in HubSpot. # noqa: E501
:return: The label of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ModelProperty.
A human-readable property label that will be shown in HubSpot. # noqa: E501
:param label: The label of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and label is None: # noqa: E501
raise ValueError("Invalid value for `label`, must not be `None`") # noqa: E501
self._label = label
@property
def type(self):
"""Gets the type of this ModelProperty. # noqa: E501
The property data type. # noqa: E501
:return: The type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ModelProperty.
The property data type. # noqa: E501
:param type: The type of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def field_type(self):
"""Gets the field_type of this ModelProperty. # noqa: E501
Controls how the property appears in HubSpot. # noqa: E501
:return: The field_type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._field_type
@field_type.setter
def field_type(self, field_type):
"""Sets the field_type of this ModelProperty.
Controls how the property appears in HubSpot. # noqa: E501
:param field_type: The field_type of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and field_type is None: # noqa: E501
raise ValueError("Invalid value for `field_type`, must not be `None`") # noqa: E501
self._field_type = field_type
@property
def description(self):
"""Gets the description of this ModelProperty. # noqa: E501
A description of the property that will be shown as help text in HubSpot. # noqa: E501
:return: The description of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ModelProperty.
A description of the property that will be shown as help text in HubSpot. # noqa: E501
:param description: The description of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def group_name(self):
"""Gets the group_name of this ModelProperty. # noqa: E501
The name of the property group the property belongs to. # noqa: E501
:return: The group_name of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this ModelProperty.
The name of the property group the property belongs to. # noqa: E501
:param group_name: The group_name of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and group_name is None: # noqa: E501
raise ValueError("Invalid value for `group_name`, must not be `None`") # noqa: E501
self._group_name = group_name
@property
def options(self):
"""Gets the options of this ModelProperty. # noqa: E501
A list of valid options for the property. This field is required for enumerated properties, but will be empty for other property types. # noqa: E501
:return: The options of this ModelProperty. # noqa: E501
:rtype: list[Option]
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this ModelProperty.
A list of valid options for the property. This field is required for enumerated properties, but will be empty for other property types. # noqa: E501
:param options: The options of this ModelProperty. # noqa: E501
:type: list[Option]
"""
if self.local_vars_configuration.client_side_validation and options is None: # noqa: E501
raise ValueError("Invalid value for `options`, must not be `None`") # noqa: E501
self._options = options
@property
def created_user_id(self):
"""Gets the created_user_id of this ModelProperty. # noqa: E501
The internal ID of the user who created the property in HubSpot. This field may not exist if the property was created outside of HubSpot. # noqa: E501
:return: The created_user_id of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._created_user_id
@created_user_id.setter
def created_user_id(self, created_user_id):
"""Sets the created_user_id of this ModelProperty.
The internal ID of the user who created the property in HubSpot. This field may not exist if the property was created outside of HubSpot. # noqa: E501
:param created_user_id: The created_user_id of this ModelProperty. # noqa: E501
:type: str
"""
self._created_user_id = created_user_id
@property
def updated_user_id(self):
"""Gets the updated_user_id of this ModelProperty. # noqa: E501
The internal user ID of the user who updated the property in HubSpot. This field may not exist if the property was updated outside of HubSpot. # noqa: E501
:return: The updated_user_id of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._updated_user_id
@updated_user_id.setter
def updated_user_id(self, updated_user_id):
"""Sets the updated_user_id of this ModelProperty.
The internal user ID of the user who updated the property in HubSpot. This field may not exist if the property was updated outside of HubSpot. # noqa: E501
:param updated_user_id: The updated_user_id of this ModelProperty. # noqa: E501
:type: str
"""
self._updated_user_id = updated_user_id
@property
def referenced_object_type(self):
"""Gets the referenced_object_type of this ModelProperty. # noqa: E501
If this property is related to other object(s), they'll be listed here. # noqa: E501
:return: The referenced_object_type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._referenced_object_type
@referenced_object_type.setter
def referenced_object_type(self, referenced_object_type):
"""Sets the referenced_object_type of this ModelProperty.
If this property is related to other object(s), they'll be listed here. # noqa: E501
:param referenced_object_type: The referenced_object_type of this ModelProperty. # noqa: E501
:type: str
"""
self._referenced_object_type = referenced_object_type
@property
def display_order(self):
"""Gets the display_order of this ModelProperty. # noqa: E501
The order that this property should be displayed in the HubSpot UI relative to other properties for this object type. Properties are displayed in order starting with the lowest positive integer value. A value of -1 will cause the property to be displayed **after** any positive values. # noqa: E501
:return: The display_order of this ModelProperty. # noqa: E501
:rtype: int
"""
return self._display_order
@display_order.setter
def display_order(self, display_order):
"""Sets the display_order of this ModelProperty.
The order that this property should be displayed in the HubSpot UI relative to other properties for this object type. Properties are displayed in order starting with the lowest positive integer value. A value of -1 will cause the property to be displayed **after** any positive values. # noqa: E501
:param display_order: The display_order of this ModelProperty. # noqa: E501
:type: int
"""
self._display_order = display_order
@property
def calculated(self):
"""Gets the calculated of this ModelProperty. # noqa: E501
For default properties, true indicates that the property is calculated by a HubSpot process. It has no effect for custom properties. # noqa: E501
:return: The calculated of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._calculated
@calculated.setter
def calculated(self, calculated):
"""Sets the calculated of this ModelProperty.
For default properties, true indicates that the property is calculated by a HubSpot process. It has no effect for custom properties. # noqa: E501
:param calculated: The calculated of this ModelProperty. # noqa: E501
:type: bool
"""
self._calculated = calculated
@property
def external_options(self):
"""Gets the external_options of this ModelProperty. # noqa: E501
For default properties, true indicates that the options are stored externally to the property settings. # noqa: E501
:return: The external_options of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._external_options
@external_options.setter
def external_options(self, external_options):
"""Sets the external_options of this ModelProperty.
For default properties, true indicates that the options are stored externally to the property settings. # noqa: E501
:param external_options: The external_options of this ModelProperty. # noqa: E501
:type: bool
"""
self._external_options = external_options
@property
def archived(self):
"""Gets the archived of this ModelProperty. # noqa: E501
Whether or not the property is archived. # noqa: E501
:return: The archived of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._archived
@archived.setter
def archived(self, archived):
"""Sets the archived of this ModelProperty.
Whether or not the property is archived. # noqa: E501
:param archived: The archived of this ModelProperty. # noqa: E501
:type: bool
"""
self._archived = archived
@property
def has_unique_value(self):
"""Gets the has_unique_value of this ModelProperty. # noqa: E501
Whether or not the property's value must be unique. Once set, this can't be changed. # noqa: E501
:return: The has_unique_value of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._has_unique_value
@has_unique_value.setter
def has_unique_value(self, has_unique_value):
"""Sets the has_unique_value of this ModelProperty.
Whether or not the property's value must be unique. Once set, this can't be changed. # noqa: E501
:param has_unique_value: The has_unique_value of this ModelProperty. # noqa: E501
:type: bool
"""
self._has_unique_value = has_unique_value
@property
def hidden(self):
"""Gets the hidden of this ModelProperty. # noqa: E501
:return: The hidden of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._hidden
@hidden.setter
def hidden(self, hidden):
"""Sets the hidden of this ModelProperty.
:param hidden: The hidden of this ModelProperty. # noqa: E501
:type: bool
"""
self._hidden = hidden
@property
def hubspot_defined(self):
"""Gets the hubspot_defined of this ModelProperty. # noqa: E501
This will be true for default object properties built into HubSpot. # noqa: E501
:return: The hubspot_defined of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._hubspot_defined
@hubspot_defined.setter
def hubspot_defined(self, hubspot_defined):
"""Sets the hubspot_defined of this ModelProperty.
This will be true for default object properties built into HubSpot. # noqa: E501
:param hubspot_defined: The hubspot_defined of this ModelProperty. # noqa: E501
:type: bool
"""
self._hubspot_defined = hubspot_defined
@property
def show_currency_symbol(self):
"""Gets the show_currency_symbol of this ModelProperty. # noqa: E501
Whether the property will display the currency symbol set in the account settings. # noqa: E501
:return: The show_currency_symbol of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._show_currency_symbol
@show_currency_symbol.setter
def show_currency_symbol(self, show_currency_symbol):
"""Sets the show_currency_symbol of this ModelProperty.
Whether the property will display the currency symbol set in the account settings. # noqa: E501
:param show_currency_symbol: The show_currency_symbol of this ModelProperty. # noqa: E501
:type: bool
"""
self._show_currency_symbol = show_currency_symbol
@property
def modification_metadata(self):
"""Gets the modification_metadata of this ModelProperty. # noqa: E501
:return: The modification_metadata of this ModelProperty. # noqa: E501
:rtype: PropertyModificationMetadata
"""
return self._modification_metadata
@modification_metadata.setter
def modification_metadata(self, modification_metadata):
"""Sets the modification_metadata of this ModelProperty.
:param modification_metadata: The modification_metadata of this ModelProperty. # noqa: E501
:type: PropertyModificationMetadata
"""
self._modification_metadata = modification_metadata
@property
def form_field(self):
"""Gets the form_field of this ModelProperty. # noqa: E501
Whether or not the property can be used in a HubSpot form. # noqa: E501
:return: The form_field of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._form_field
@form_field.setter
def form_field(self, form_field):
"""Sets the form_field of this ModelProperty.
Whether or not the property can be used in a HubSpot form. # noqa: E501
:param form_field: The form_field of this ModelProperty. # noqa: E501
:type: bool
"""
self._form_field = form_field
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ModelProperty):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ModelProperty):
return True
return self.to_dict() != other.to_dict() | hubspot/crm/schemas/models/model_property.py | import pprint
import re # noqa: F401
import six
from hubspot.crm.schemas.configuration import Configuration
class ModelProperty(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"updated_at": "datetime",
"created_at": "datetime",
"archived_at": "datetime",
"name": "str",
"label": "str",
"type": "str",
"field_type": "str",
"description": "str",
"group_name": "str",
"options": "list[Option]",
"created_user_id": "str",
"updated_user_id": "str",
"referenced_object_type": "str",
"display_order": "int",
"calculated": "bool",
"external_options": "bool",
"archived": "bool",
"has_unique_value": "bool",
"hidden": "bool",
"hubspot_defined": "bool",
"show_currency_symbol": "bool",
"modification_metadata": "PropertyModificationMetadata",
"form_field": "bool",
}
attribute_map = {
"updated_at": "updatedAt",
"created_at": "createdAt",
"archived_at": "archivedAt",
"name": "name",
"label": "label",
"type": "type",
"field_type": "fieldType",
"description": "description",
"group_name": "groupName",
"options": "options",
"created_user_id": "createdUserId",
"updated_user_id": "updatedUserId",
"referenced_object_type": "referencedObjectType",
"display_order": "displayOrder",
"calculated": "calculated",
"external_options": "externalOptions",
"archived": "archived",
"has_unique_value": "hasUniqueValue",
"hidden": "hidden",
"hubspot_defined": "hubspotDefined",
"show_currency_symbol": "showCurrencySymbol",
"modification_metadata": "modificationMetadata",
"form_field": "formField",
}
def __init__(
self,
updated_at=None,
created_at=None,
archived_at=None,
name=None,
label=None,
type=None,
field_type=None,
description=None,
group_name=None,
options=None,
created_user_id=None,
updated_user_id=None,
referenced_object_type=None,
display_order=None,
calculated=None,
external_options=None,
archived=None,
has_unique_value=None,
hidden=None,
hubspot_defined=None,
show_currency_symbol=None,
modification_metadata=None,
form_field=None,
local_vars_configuration=None,
): # noqa: E501
"""ModelProperty - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._updated_at = None
self._created_at = None
self._archived_at = None
self._name = None
self._label = None
self._type = None
self._field_type = None
self._description = None
self._group_name = None
self._options = None
self._created_user_id = None
self._updated_user_id = None
self._referenced_object_type = None
self._display_order = None
self._calculated = None
self._external_options = None
self._archived = None
self._has_unique_value = None
self._hidden = None
self._hubspot_defined = None
self._show_currency_symbol = None
self._modification_metadata = None
self._form_field = None
self.discriminator = None
if updated_at is not None:
self.updated_at = updated_at
if created_at is not None:
self.created_at = created_at
if archived_at is not None:
self.archived_at = archived_at
self.name = name
self.label = label
self.type = type
self.field_type = field_type
self.description = description
self.group_name = group_name
self.options = options
if created_user_id is not None:
self.created_user_id = created_user_id
if updated_user_id is not None:
self.updated_user_id = updated_user_id
if referenced_object_type is not None:
self.referenced_object_type = referenced_object_type
if display_order is not None:
self.display_order = display_order
if calculated is not None:
self.calculated = calculated
if external_options is not None:
self.external_options = external_options
if archived is not None:
self.archived = archived
if has_unique_value is not None:
self.has_unique_value = has_unique_value
if hidden is not None:
self.hidden = hidden
if hubspot_defined is not None:
self.hubspot_defined = hubspot_defined
if show_currency_symbol is not None:
self.show_currency_symbol = show_currency_symbol
if modification_metadata is not None:
self.modification_metadata = modification_metadata
if form_field is not None:
self.form_field = form_field
@property
def updated_at(self):
"""Gets the updated_at of this ModelProperty. # noqa: E501
:return: The updated_at of this ModelProperty. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this ModelProperty.
:param updated_at: The updated_at of this ModelProperty. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def created_at(self):
"""Gets the created_at of this ModelProperty. # noqa: E501
When the property was created # noqa: E501
:return: The created_at of this ModelProperty. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ModelProperty.
When the property was created # noqa: E501
:param created_at: The created_at of this ModelProperty. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def archived_at(self):
"""Gets the archived_at of this ModelProperty. # noqa: E501
When the property was archived. # noqa: E501
:return: The archived_at of this ModelProperty. # noqa: E501
:rtype: datetime
"""
return self._archived_at
@archived_at.setter
def archived_at(self, archived_at):
"""Sets the archived_at of this ModelProperty.
When the property was archived. # noqa: E501
:param archived_at: The archived_at of this ModelProperty. # noqa: E501
:type: datetime
"""
self._archived_at = archived_at
@property
def name(self):
"""Gets the name of this ModelProperty. # noqa: E501
The internal property name, which must be used when referencing the property via the API. # noqa: E501
:return: The name of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ModelProperty.
The internal property name, which must be used when referencing the property via the API. # noqa: E501
:param name: The name of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def label(self):
"""Gets the label of this ModelProperty. # noqa: E501
A human-readable property label that will be shown in HubSpot. # noqa: E501
:return: The label of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ModelProperty.
A human-readable property label that will be shown in HubSpot. # noqa: E501
:param label: The label of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and label is None: # noqa: E501
raise ValueError("Invalid value for `label`, must not be `None`") # noqa: E501
self._label = label
@property
def type(self):
"""Gets the type of this ModelProperty. # noqa: E501
The property data type. # noqa: E501
:return: The type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ModelProperty.
The property data type. # noqa: E501
:param type: The type of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def field_type(self):
"""Gets the field_type of this ModelProperty. # noqa: E501
Controls how the property appears in HubSpot. # noqa: E501
:return: The field_type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._field_type
@field_type.setter
def field_type(self, field_type):
"""Sets the field_type of this ModelProperty.
Controls how the property appears in HubSpot. # noqa: E501
:param field_type: The field_type of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and field_type is None: # noqa: E501
raise ValueError("Invalid value for `field_type`, must not be `None`") # noqa: E501
self._field_type = field_type
@property
def description(self):
"""Gets the description of this ModelProperty. # noqa: E501
A description of the property that will be shown as help text in HubSpot. # noqa: E501
:return: The description of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ModelProperty.
A description of the property that will be shown as help text in HubSpot. # noqa: E501
:param description: The description of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def group_name(self):
"""Gets the group_name of this ModelProperty. # noqa: E501
The name of the property group the property belongs to. # noqa: E501
:return: The group_name of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this ModelProperty.
The name of the property group the property belongs to. # noqa: E501
:param group_name: The group_name of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and group_name is None: # noqa: E501
raise ValueError("Invalid value for `group_name`, must not be `None`") # noqa: E501
self._group_name = group_name
@property
def options(self):
"""Gets the options of this ModelProperty. # noqa: E501
A list of valid options for the property. This field is required for enumerated properties, but will be empty for other property types. # noqa: E501
:return: The options of this ModelProperty. # noqa: E501
:rtype: list[Option]
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this ModelProperty.
A list of valid options for the property. This field is required for enumerated properties, but will be empty for other property types. # noqa: E501
:param options: The options of this ModelProperty. # noqa: E501
:type: list[Option]
"""
if self.local_vars_configuration.client_side_validation and options is None: # noqa: E501
raise ValueError("Invalid value for `options`, must not be `None`") # noqa: E501
self._options = options
@property
def created_user_id(self):
"""Gets the created_user_id of this ModelProperty. # noqa: E501
The internal ID of the user who created the property in HubSpot. This field may not exist if the property was created outside of HubSpot. # noqa: E501
:return: The created_user_id of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._created_user_id
@created_user_id.setter
def created_user_id(self, created_user_id):
"""Sets the created_user_id of this ModelProperty.
The internal ID of the user who created the property in HubSpot. This field may not exist if the property was created outside of HubSpot. # noqa: E501
:param created_user_id: The created_user_id of this ModelProperty. # noqa: E501
:type: str
"""
self._created_user_id = created_user_id
@property
def updated_user_id(self):
"""Gets the updated_user_id of this ModelProperty. # noqa: E501
The internal user ID of the user who updated the property in HubSpot. This field may not exist if the property was updated outside of HubSpot. # noqa: E501
:return: The updated_user_id of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._updated_user_id
@updated_user_id.setter
def updated_user_id(self, updated_user_id):
"""Sets the updated_user_id of this ModelProperty.
The internal user ID of the user who updated the property in HubSpot. This field may not exist if the property was updated outside of HubSpot. # noqa: E501
:param updated_user_id: The updated_user_id of this ModelProperty. # noqa: E501
:type: str
"""
self._updated_user_id = updated_user_id
@property
def referenced_object_type(self):
"""Gets the referenced_object_type of this ModelProperty. # noqa: E501
If this property is related to other object(s), they'll be listed here. # noqa: E501
:return: The referenced_object_type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._referenced_object_type
@referenced_object_type.setter
def referenced_object_type(self, referenced_object_type):
"""Sets the referenced_object_type of this ModelProperty.
If this property is related to other object(s), they'll be listed here. # noqa: E501
:param referenced_object_type: The referenced_object_type of this ModelProperty. # noqa: E501
:type: str
"""
self._referenced_object_type = referenced_object_type
@property
def display_order(self):
"""Gets the display_order of this ModelProperty. # noqa: E501
The order that this property should be displayed in the HubSpot UI relative to other properties for this object type. Properties are displayed in order starting with the lowest positive integer value. A value of -1 will cause the property to be displayed **after** any positive values. # noqa: E501
:return: The display_order of this ModelProperty. # noqa: E501
:rtype: int
"""
return self._display_order
@display_order.setter
def display_order(self, display_order):
"""Sets the display_order of this ModelProperty.
The order that this property should be displayed in the HubSpot UI relative to other properties for this object type. Properties are displayed in order starting with the lowest positive integer value. A value of -1 will cause the property to be displayed **after** any positive values. # noqa: E501
:param display_order: The display_order of this ModelProperty. # noqa: E501
:type: int
"""
self._display_order = display_order
@property
def calculated(self):
"""Gets the calculated of this ModelProperty. # noqa: E501
For default properties, true indicates that the property is calculated by a HubSpot process. It has no effect for custom properties. # noqa: E501
:return: The calculated of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._calculated
@calculated.setter
def calculated(self, calculated):
"""Sets the calculated of this ModelProperty.
For default properties, true indicates that the property is calculated by a HubSpot process. It has no effect for custom properties. # noqa: E501
:param calculated: The calculated of this ModelProperty. # noqa: E501
:type: bool
"""
self._calculated = calculated
@property
def external_options(self):
"""Gets the external_options of this ModelProperty. # noqa: E501
For default properties, true indicates that the options are stored externally to the property settings. # noqa: E501
:return: The external_options of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._external_options
@external_options.setter
def external_options(self, external_options):
"""Sets the external_options of this ModelProperty.
For default properties, true indicates that the options are stored externally to the property settings. # noqa: E501
:param external_options: The external_options of this ModelProperty. # noqa: E501
:type: bool
"""
self._external_options = external_options
@property
def archived(self):
"""Gets the archived of this ModelProperty. # noqa: E501
Whether or not the property is archived. # noqa: E501
:return: The archived of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._archived
@archived.setter
def archived(self, archived):
"""Sets the archived of this ModelProperty.
Whether or not the property is archived. # noqa: E501
:param archived: The archived of this ModelProperty. # noqa: E501
:type: bool
"""
self._archived = archived
@property
def has_unique_value(self):
"""Gets the has_unique_value of this ModelProperty. # noqa: E501
Whether or not the property's value must be unique. Once set, this can't be changed. # noqa: E501
:return: The has_unique_value of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._has_unique_value
@has_unique_value.setter
def has_unique_value(self, has_unique_value):
"""Sets the has_unique_value of this ModelProperty.
Whether or not the property's value must be unique. Once set, this can't be changed. # noqa: E501
:param has_unique_value: The has_unique_value of this ModelProperty. # noqa: E501
:type: bool
"""
self._has_unique_value = has_unique_value
@property
def hidden(self):
"""Gets the hidden of this ModelProperty. # noqa: E501
:return: The hidden of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._hidden
@hidden.setter
def hidden(self, hidden):
"""Sets the hidden of this ModelProperty.
:param hidden: The hidden of this ModelProperty. # noqa: E501
:type: bool
"""
self._hidden = hidden
@property
def hubspot_defined(self):
"""Gets the hubspot_defined of this ModelProperty. # noqa: E501
This will be true for default object properties built into HubSpot. # noqa: E501
:return: The hubspot_defined of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._hubspot_defined
@hubspot_defined.setter
def hubspot_defined(self, hubspot_defined):
"""Sets the hubspot_defined of this ModelProperty.
This will be true for default object properties built into HubSpot. # noqa: E501
:param hubspot_defined: The hubspot_defined of this ModelProperty. # noqa: E501
:type: bool
"""
self._hubspot_defined = hubspot_defined
@property
def show_currency_symbol(self):
"""Gets the show_currency_symbol of this ModelProperty. # noqa: E501
Whether the property will display the currency symbol set in the account settings. # noqa: E501
:return: The show_currency_symbol of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._show_currency_symbol
@show_currency_symbol.setter
def show_currency_symbol(self, show_currency_symbol):
"""Sets the show_currency_symbol of this ModelProperty.
Whether the property will display the currency symbol set in the account settings. # noqa: E501
:param show_currency_symbol: The show_currency_symbol of this ModelProperty. # noqa: E501
:type: bool
"""
self._show_currency_symbol = show_currency_symbol
@property
def modification_metadata(self):
"""Gets the modification_metadata of this ModelProperty. # noqa: E501
:return: The modification_metadata of this ModelProperty. # noqa: E501
:rtype: PropertyModificationMetadata
"""
return self._modification_metadata
@modification_metadata.setter
def modification_metadata(self, modification_metadata):
"""Sets the modification_metadata of this ModelProperty.
:param modification_metadata: The modification_metadata of this ModelProperty. # noqa: E501
:type: PropertyModificationMetadata
"""
self._modification_metadata = modification_metadata
@property
def form_field(self):
"""Gets the form_field of this ModelProperty. # noqa: E501
Whether or not the property can be used in a HubSpot form. # noqa: E501
:return: The form_field of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._form_field
@form_field.setter
def form_field(self, form_field):
"""Sets the form_field of this ModelProperty.
Whether or not the property can be used in a HubSpot form. # noqa: E501
:param form_field: The form_field of this ModelProperty. # noqa: E501
:type: bool
"""
self._form_field = form_field
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ModelProperty):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ModelProperty):
return True
return self.to_dict() != other.to_dict() | 0.690872 | 0.195613 |
import copy
import os
import unittest
from collections import OrderedDict
from conans import tools
from conans.client.client_cache import ClientCache
from conans.client.conf.detect import detect_defaults_settings
from conans.paths import CONANFILE_TXT
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient
from conans.util.files import save
class MockOut(object):
def writeln(self, *args, **kwargs):
pass
def error(self, *args, **kwargs):
pass
class ConfDefaultSettingsTest(unittest.TestCase):
def test_update_settings(self):
default_profile = """[settings]
compiler=Visual Studio
compiler.version=42
arch=x86_64
compiler.runtime=MT
os=Windows
"""
client = TestClient()
save(client.client_cache.default_profile_path, default_profile)
client.save({CONANFILE_TXT: ""})
error = client.run("install Any/0.2@user/channel", ignore_error=True)
self.assertTrue(error)
self.assertIn("'42' is not a valid 'settings.compiler.version' value", client.user_io.out)
client.run('install -s compiler="Visual Studio" -s compiler.version=14')
self.assertNotIn("'42' is not a valid 'settings.compiler.version' value", client.user_io.out)
with tools.environment_append({"CONAN_ENV_COMPILER_VERSION": "14"}):
client.run('install')
self.assertIsNone(os.environ.get("CONAN_ENV_COMPILER_VERSION"))
def env_setting_override_test(self):
tmp_dir = temp_folder()
out = MockOut()
cache = ClientCache(tmp_dir, None, out)
base_settings = OrderedDict(detect_defaults_settings(out))
with tools.environment_append({"CONAN_ENV_COMPILER_VERSION": "4.6"}):
expected = copy.copy(base_settings)
expected["compiler.version"] = "4.6"
self.assertEquals(cache.default_profile.settings, expected)
tmp_dir = temp_folder()
cache = ClientCache(tmp_dir, None, out)
with tools.environment_append({}):
self.assertEquals(cache.default_profile.settings, base_settings)
tmp_dir = temp_folder()
cache = ClientCache(tmp_dir, None, out)
# If compiler is overwritten compiler subsettings are not assigned
with tools.environment_append({"CONAN_ENV_COMPILER": "Visual Studio"}):
expected = copy.copy(base_settings)
expected["compiler"] = "Visual Studio"
self.assertEquals(cache.default_profile.settings, expected)
tmp_dir = temp_folder()
cache = ClientCache(tmp_dir, None, out)
with tools.environment_append({"CONAN_ENV_COMPILER": "Visual Studio",
"CONAN_ENV_COMPILER_VERSION": "14",
"CONAN_ENV_COMPILER_RUNTIME": "MDd"}):
expected = copy.copy(base_settings)
expected["compiler"] = "Visual Studio"
expected["compiler.runtime"] = "MDd"
expected["compiler.version"] = "14"
self.assertEquals(cache.default_profile.settings, expected) | conans/test/integration/conf_default_settings_test.py | import copy
import os
import unittest
from collections import OrderedDict
from conans import tools
from conans.client.client_cache import ClientCache
from conans.client.conf.detect import detect_defaults_settings
from conans.paths import CONANFILE_TXT
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient
from conans.util.files import save
class MockOut(object):
def writeln(self, *args, **kwargs):
pass
def error(self, *args, **kwargs):
pass
class ConfDefaultSettingsTest(unittest.TestCase):
def test_update_settings(self):
default_profile = """[settings]
compiler=Visual Studio
compiler.version=42
arch=x86_64
compiler.runtime=MT
os=Windows
"""
client = TestClient()
save(client.client_cache.default_profile_path, default_profile)
client.save({CONANFILE_TXT: ""})
error = client.run("install Any/0.2@user/channel", ignore_error=True)
self.assertTrue(error)
self.assertIn("'42' is not a valid 'settings.compiler.version' value", client.user_io.out)
client.run('install -s compiler="Visual Studio" -s compiler.version=14')
self.assertNotIn("'42' is not a valid 'settings.compiler.version' value", client.user_io.out)
with tools.environment_append({"CONAN_ENV_COMPILER_VERSION": "14"}):
client.run('install')
self.assertIsNone(os.environ.get("CONAN_ENV_COMPILER_VERSION"))
def env_setting_override_test(self):
tmp_dir = temp_folder()
out = MockOut()
cache = ClientCache(tmp_dir, None, out)
base_settings = OrderedDict(detect_defaults_settings(out))
with tools.environment_append({"CONAN_ENV_COMPILER_VERSION": "4.6"}):
expected = copy.copy(base_settings)
expected["compiler.version"] = "4.6"
self.assertEquals(cache.default_profile.settings, expected)
tmp_dir = temp_folder()
cache = ClientCache(tmp_dir, None, out)
with tools.environment_append({}):
self.assertEquals(cache.default_profile.settings, base_settings)
tmp_dir = temp_folder()
cache = ClientCache(tmp_dir, None, out)
# If compiler is overwritten compiler subsettings are not assigned
with tools.environment_append({"CONAN_ENV_COMPILER": "Visual Studio"}):
expected = copy.copy(base_settings)
expected["compiler"] = "Visual Studio"
self.assertEquals(cache.default_profile.settings, expected)
tmp_dir = temp_folder()
cache = ClientCache(tmp_dir, None, out)
with tools.environment_append({"CONAN_ENV_COMPILER": "Visual Studio",
"CONAN_ENV_COMPILER_VERSION": "14",
"CONAN_ENV_COMPILER_RUNTIME": "MDd"}):
expected = copy.copy(base_settings)
expected["compiler"] = "Visual Studio"
expected["compiler.runtime"] = "MDd"
expected["compiler.version"] = "14"
self.assertEquals(cache.default_profile.settings, expected) | 0.457621 | 0.185062 |
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import datetime, timedelta
from dateutil import rrule
from flask import flash, jsonify, request, session
from werkzeug.exceptions import BadRequest
from indico.modules.events.cloning import EventCloner
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.management.forms import (CLONE_REPEAT_CHOICES, CloneCategorySelectForm, CloneContentsForm,
CloneRepeatabilityForm, CloneRepeatIntervalForm,
CloneRepeatOnceForm, CloneRepeatPatternForm, ImportContentsForm,
ImportSourceEventForm)
from indico.modules.events.operations import clone_event, clone_into_event
from indico.modules.events.util import get_event_from_url
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.util import jsonify_data, jsonify_template
REPEAT_FORM_MAP = {
'once': CloneRepeatOnceForm,
'interval': CloneRepeatIntervalForm,
'pattern': CloneRepeatPatternForm
}
RRULE_FREQ_MAP = OrderedDict([
('years', rrule.YEARLY),
('months', rrule.MONTHLY),
('weeks', rrule.WEEKLY),
('days', rrule.DAILY),
('hours', rrule.HOURLY),
('minutes', rrule.MINUTELY),
('seconds', rrule.SECONDLY)
])
def relativedelta_to_rrule_interval(rdelta):
for unit, freq in RRULE_FREQ_MAP.viewitems():
value = getattr(rdelta, unit)
if value:
return freq, value
raise ValueError('Invalid relativedelta(...) object')
def get_clone_calculator(repeatability, event):
if repeatability == 'interval':
return IntervalCloneCalculator(event)
elif repeatability == 'pattern':
return PatternCloneCalculator(event)
else:
raise BadRequest
class CloneCalculator(object):
def __init__(self, event):
self.event = event
def _naivify(self, dt):
return dt.astimezone(self.event.tzinfo).replace(tzinfo=None)
def _tzify(self, dates):
return [self.event.tzinfo.localize(dt) for dt in dates]
def _calc_stop_criteria(self, form):
args = {}
if form.stop_criterion.data == 'day':
args['until'] = datetime.combine(form.until_dt.data, self._naivify(form.start_dt.data).time())
else:
args['count'] = form.num_times.data
return args
def calculate(self, formdata):
"""Calculate dates of cloned events
:return: a ``(dates, last_day_of_month)`` tuple
"""
form = self.form_class(self.event, formdata=formdata)
if form.validate():
return self._calculate(form)
else:
raise ValueError([(unicode(getattr(form, k).label.text), v) for k, v in form.errors.viewitems()])
class PatternCloneCalculator(CloneCalculator):
form_class = CloneRepeatPatternForm
def _calculate(self, form):
args = {'dtstart': self._naivify(form.start_dt.data)}
args.update(self._calc_stop_criteria(form))
dates = self._tzify(rrule.rrule(rrule.MONTHLY,
interval=form.num_months.data,
byweekday=form.week_day.week_day_data,
bysetpos=form.week_day.day_number_data,
**args))
return dates, False
class IntervalCloneCalculator(CloneCalculator):
form_class = CloneRepeatIntervalForm
def _calculate(self, form):
freq, interval = relativedelta_to_rrule_interval(form.recurrence.data)
# check if last day of month
dtstart = self._naivify(form.start_dt.data)
next_day = dtstart + timedelta(days=1)
if freq == rrule.MONTHLY and next_day.day == 1:
kwargs = dict(self._calc_stop_criteria(form), dtstart=next_day)
dates = rrule.rrule(freq, interval=interval, **kwargs)
dates = self._tzify([date - timedelta(days=1) for date in dates])
last_day_of_month = True
else:
kwargs = dict(self._calc_stop_criteria(form), dtstart=dtstart)
dates = self._tzify(rrule.rrule(freq, interval=interval, **kwargs))
last_day_of_month = False
return dates, last_day_of_month
class RHClonePreview(RHManageEventBase):
ALLOW_LOCKED = True
def _process(self):
form = CloneRepeatabilityForm()
clone_calculator = get_clone_calculator(form.repeatability.data, self.event)
try:
dates, last_day_of_month = clone_calculator.calculate(request.form)
if len(dates) > 100:
raise ValueError(_("You can clone maximum of 100 times at once"))
except ValueError as e:
return jsonify(error={'message': e.message})
return jsonify_data(count=len(dates), dates=dates, last_day_of_month=last_day_of_month, flash=False)
class RHCloneEvent(RHManageEventBase):
"""Create copies of the event."""
ALLOW_LOCKED = True
def _form_for_step(self, step, set_defaults=True):
if step == 1:
return CloneRepeatabilityForm()
elif step == 2:
return CloneContentsForm(self.event, set_defaults=set_defaults)
elif step == 3:
default_category = (self.event.category if self.event.category.can_create_events(session.user)
else None)
return CloneCategorySelectForm(self.event, category=default_category)
elif step == 4:
return REPEAT_FORM_MAP[request.form['repeatability']](self.event, set_defaults=set_defaults)
else:
return None
def _process(self):
step = int(request.form.get('step', 1))
tpl_args = {}
form = self._form_for_step(step, set_defaults=True)
prev_form = self._form_for_step(step - 1)
if prev_form and not prev_form.validate():
form = prev_form
step = step - 1
if step == 4:
tpl_args.update({
'step_title': dict(CLONE_REPEAT_CHOICES)[request.form['repeatability']],
})
elif step > 4:
# last step - perform actual cloning
form = REPEAT_FORM_MAP[request.form['repeatability']](self.event)
if form.validate_on_submit():
if form.repeatability.data == 'once':
dates = [form.start_dt.data]
else:
clone_calculator = get_clone_calculator(form.repeatability.data, self.event)
dates = clone_calculator.calculate(request.form)[0]
clones = [clone_event(self.event, start_dt, set(form.selected_items.data), form.category.data)
for start_dt in dates]
if len(clones) == 1:
flash(_('Welcome to your cloned event!'), 'success')
return jsonify_data(redirect=url_for('event_management.settings', clones[0]), flash=False)
else:
flash(_('{} new events created.').format(len(dates)), 'success')
return jsonify_data(redirect=form.category.data.url, flash=False)
else:
# back to step 4, since there's been an error
step = 4
dependencies = {c.name: {'requires': list(c.requires_deep), 'required_by': list(c.required_by_deep)}
for c in EventCloner.get_cloners(self.event)}
return jsonify_template('events/management/clone_event.html', event=self.event, step=step, form=form,
cloner_dependencies=dependencies, **tpl_args)
def _get_import_source_from_url(target_event, url):
event = get_event_from_url(url)
if event == target_event:
raise ValueError(_('Cannot import from the same event'))
if event.type_ != target_event.type_:
raise ValueError(_('Cannot import from a different type of event'))
if not event.can_manage(session.user):
raise ValueError(_('You do not have management rights to this event'))
return event
class RHImportFromEvent(RHManageEventBase):
"""Import data from another event."""
def _process_args(self):
RHManageEventBase._process_args(self)
url = request.form.get('source_event_url')
self.source_event = _get_import_source_from_url(self.event, url) if url else None
def _form_for_step(self, step, set_defaults=True):
if step == 1:
return ImportSourceEventForm()
elif step == 2:
return ImportContentsForm(self.source_event, self.event, set_defaults=set_defaults)
else:
return None
def _process(self):
step = int(request.form.get('step', 1))
form = self._form_for_step(step, set_defaults=True)
prev_form = self._form_for_step(step - 1)
if prev_form and not prev_form.validate():
form = prev_form
step = step - 1
elif step > 2:
# last step - perform actual cloning
form = ImportContentsForm(self.source_event, self.event)
if form.validate_on_submit():
updated_event = clone_into_event(self.source_event, self.event, set(form.selected_items.data))
flash(_('Import successful!'), 'success')
return jsonify_data(redirect=url_for('event_management.settings', updated_event), flash=False)
else:
# back to step 2, since there's been an error
step = 2
dependencies = {c.name: {'requires': list(c.requires_deep), 'required_by': list(c.required_by_deep)}
for c in EventCloner.get_cloners(self.event)}
return jsonify_template('events/management/import_event.html', event=self.event, step=step, form=form,
cloner_dependencies=dependencies)
class RHImportEventDetails(RHManageEventBase):
ALLOW_LOCKED = True
def _process(self):
from indico.modules.events.schemas import EventDetailsSchema
schema = EventDetailsSchema()
form = ImportSourceEventForm()
try:
event = _get_import_source_from_url(self.event, form.source_event_url.data)
except ValueError as e:
return jsonify(error={'message': e.message})
return jsonify_data(event=schema.dump(event)) | indico/modules/events/management/controllers/cloning.py |
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import datetime, timedelta
from dateutil import rrule
from flask import flash, jsonify, request, session
from werkzeug.exceptions import BadRequest
from indico.modules.events.cloning import EventCloner
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.events.management.forms import (CLONE_REPEAT_CHOICES, CloneCategorySelectForm, CloneContentsForm,
CloneRepeatabilityForm, CloneRepeatIntervalForm,
CloneRepeatOnceForm, CloneRepeatPatternForm, ImportContentsForm,
ImportSourceEventForm)
from indico.modules.events.operations import clone_event, clone_into_event
from indico.modules.events.util import get_event_from_url
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.util import jsonify_data, jsonify_template
REPEAT_FORM_MAP = {
'once': CloneRepeatOnceForm,
'interval': CloneRepeatIntervalForm,
'pattern': CloneRepeatPatternForm
}
RRULE_FREQ_MAP = OrderedDict([
('years', rrule.YEARLY),
('months', rrule.MONTHLY),
('weeks', rrule.WEEKLY),
('days', rrule.DAILY),
('hours', rrule.HOURLY),
('minutes', rrule.MINUTELY),
('seconds', rrule.SECONDLY)
])
def relativedelta_to_rrule_interval(rdelta):
for unit, freq in RRULE_FREQ_MAP.viewitems():
value = getattr(rdelta, unit)
if value:
return freq, value
raise ValueError('Invalid relativedelta(...) object')
def get_clone_calculator(repeatability, event):
if repeatability == 'interval':
return IntervalCloneCalculator(event)
elif repeatability == 'pattern':
return PatternCloneCalculator(event)
else:
raise BadRequest
class CloneCalculator(object):
def __init__(self, event):
self.event = event
def _naivify(self, dt):
return dt.astimezone(self.event.tzinfo).replace(tzinfo=None)
def _tzify(self, dates):
return [self.event.tzinfo.localize(dt) for dt in dates]
def _calc_stop_criteria(self, form):
args = {}
if form.stop_criterion.data == 'day':
args['until'] = datetime.combine(form.until_dt.data, self._naivify(form.start_dt.data).time())
else:
args['count'] = form.num_times.data
return args
def calculate(self, formdata):
"""Calculate dates of cloned events
:return: a ``(dates, last_day_of_month)`` tuple
"""
form = self.form_class(self.event, formdata=formdata)
if form.validate():
return self._calculate(form)
else:
raise ValueError([(unicode(getattr(form, k).label.text), v) for k, v in form.errors.viewitems()])
class PatternCloneCalculator(CloneCalculator):
form_class = CloneRepeatPatternForm
def _calculate(self, form):
args = {'dtstart': self._naivify(form.start_dt.data)}
args.update(self._calc_stop_criteria(form))
dates = self._tzify(rrule.rrule(rrule.MONTHLY,
interval=form.num_months.data,
byweekday=form.week_day.week_day_data,
bysetpos=form.week_day.day_number_data,
**args))
return dates, False
class IntervalCloneCalculator(CloneCalculator):
form_class = CloneRepeatIntervalForm
def _calculate(self, form):
freq, interval = relativedelta_to_rrule_interval(form.recurrence.data)
# check if last day of month
dtstart = self._naivify(form.start_dt.data)
next_day = dtstart + timedelta(days=1)
if freq == rrule.MONTHLY and next_day.day == 1:
kwargs = dict(self._calc_stop_criteria(form), dtstart=next_day)
dates = rrule.rrule(freq, interval=interval, **kwargs)
dates = self._tzify([date - timedelta(days=1) for date in dates])
last_day_of_month = True
else:
kwargs = dict(self._calc_stop_criteria(form), dtstart=dtstart)
dates = self._tzify(rrule.rrule(freq, interval=interval, **kwargs))
last_day_of_month = False
return dates, last_day_of_month
class RHClonePreview(RHManageEventBase):
ALLOW_LOCKED = True
def _process(self):
form = CloneRepeatabilityForm()
clone_calculator = get_clone_calculator(form.repeatability.data, self.event)
try:
dates, last_day_of_month = clone_calculator.calculate(request.form)
if len(dates) > 100:
raise ValueError(_("You can clone maximum of 100 times at once"))
except ValueError as e:
return jsonify(error={'message': e.message})
return jsonify_data(count=len(dates), dates=dates, last_day_of_month=last_day_of_month, flash=False)
class RHCloneEvent(RHManageEventBase):
"""Create copies of the event."""
ALLOW_LOCKED = True
def _form_for_step(self, step, set_defaults=True):
if step == 1:
return CloneRepeatabilityForm()
elif step == 2:
return CloneContentsForm(self.event, set_defaults=set_defaults)
elif step == 3:
default_category = (self.event.category if self.event.category.can_create_events(session.user)
else None)
return CloneCategorySelectForm(self.event, category=default_category)
elif step == 4:
return REPEAT_FORM_MAP[request.form['repeatability']](self.event, set_defaults=set_defaults)
else:
return None
def _process(self):
step = int(request.form.get('step', 1))
tpl_args = {}
form = self._form_for_step(step, set_defaults=True)
prev_form = self._form_for_step(step - 1)
if prev_form and not prev_form.validate():
form = prev_form
step = step - 1
if step == 4:
tpl_args.update({
'step_title': dict(CLONE_REPEAT_CHOICES)[request.form['repeatability']],
})
elif step > 4:
# last step - perform actual cloning
form = REPEAT_FORM_MAP[request.form['repeatability']](self.event)
if form.validate_on_submit():
if form.repeatability.data == 'once':
dates = [form.start_dt.data]
else:
clone_calculator = get_clone_calculator(form.repeatability.data, self.event)
dates = clone_calculator.calculate(request.form)[0]
clones = [clone_event(self.event, start_dt, set(form.selected_items.data), form.category.data)
for start_dt in dates]
if len(clones) == 1:
flash(_('Welcome to your cloned event!'), 'success')
return jsonify_data(redirect=url_for('event_management.settings', clones[0]), flash=False)
else:
flash(_('{} new events created.').format(len(dates)), 'success')
return jsonify_data(redirect=form.category.data.url, flash=False)
else:
# back to step 4, since there's been an error
step = 4
dependencies = {c.name: {'requires': list(c.requires_deep), 'required_by': list(c.required_by_deep)}
for c in EventCloner.get_cloners(self.event)}
return jsonify_template('events/management/clone_event.html', event=self.event, step=step, form=form,
cloner_dependencies=dependencies, **tpl_args)
def _get_import_source_from_url(target_event, url):
event = get_event_from_url(url)
if event == target_event:
raise ValueError(_('Cannot import from the same event'))
if event.type_ != target_event.type_:
raise ValueError(_('Cannot import from a different type of event'))
if not event.can_manage(session.user):
raise ValueError(_('You do not have management rights to this event'))
return event
class RHImportFromEvent(RHManageEventBase):
"""Import data from another event."""
def _process_args(self):
RHManageEventBase._process_args(self)
url = request.form.get('source_event_url')
self.source_event = _get_import_source_from_url(self.event, url) if url else None
def _form_for_step(self, step, set_defaults=True):
if step == 1:
return ImportSourceEventForm()
elif step == 2:
return ImportContentsForm(self.source_event, self.event, set_defaults=set_defaults)
else:
return None
def _process(self):
step = int(request.form.get('step', 1))
form = self._form_for_step(step, set_defaults=True)
prev_form = self._form_for_step(step - 1)
if prev_form and not prev_form.validate():
form = prev_form
step = step - 1
elif step > 2:
# last step - perform actual cloning
form = ImportContentsForm(self.source_event, self.event)
if form.validate_on_submit():
updated_event = clone_into_event(self.source_event, self.event, set(form.selected_items.data))
flash(_('Import successful!'), 'success')
return jsonify_data(redirect=url_for('event_management.settings', updated_event), flash=False)
else:
# back to step 2, since there's been an error
step = 2
dependencies = {c.name: {'requires': list(c.requires_deep), 'required_by': list(c.required_by_deep)}
for c in EventCloner.get_cloners(self.event)}
return jsonify_template('events/management/import_event.html', event=self.event, step=step, form=form,
cloner_dependencies=dependencies)
class RHImportEventDetails(RHManageEventBase):
ALLOW_LOCKED = True
def _process(self):
from indico.modules.events.schemas import EventDetailsSchema
schema = EventDetailsSchema()
form = ImportSourceEventForm()
try:
event = _get_import_source_from_url(self.event, form.source_event_url.data)
except ValueError as e:
return jsonify(error={'message': e.message})
return jsonify_data(event=schema.dump(event)) | 0.696165 | 0.165357 |
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['MOAT_1', 'MOAT_1_MOAT_2', 'MOAT_2', 'MOAT_2_HFM', 'HFM', 'HFM_VFM', 'VFM', 'VFM_VDM', 'VDM', 'VDM_SSA', 'SSA', 'SSA_ES1', 'ES1', 'ES1_CRL', 'CRL', 'CRL_ES2', 'ES2']
for el_name in names:
if el_name == 'MOAT_1':
# MOAT_1: crystal 31.94m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_MOAT_1_d_sp,
_psi0r=v.op_MOAT_1_psi0r,
_psi0i=v.op_MOAT_1_psi0i,
_psi_hr=v.op_MOAT_1_psiHr,
_psi_hi=v.op_MOAT_1_psiHi,
_psi_hbr=v.op_MOAT_1_psiHBr,
_psi_hbi=v.op_MOAT_1_psiHBi,
_tc=v.op_MOAT_1_tc,
_ang_as=v.op_MOAT_1_ang_as,
)
crystal.set_orient(
_nvx=v.op_MOAT_1_nvx,
_nvy=v.op_MOAT_1_nvy,
_nvz=v.op_MOAT_1_nvz,
_tvx=v.op_MOAT_1_tvx,
_tvy=v.op_MOAT_1_tvy,
)
el.append(crystal)
pp.append(v.op_MOAT_1_pp)
mirror_file = v.op_MOAT_1_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by MOAT_1 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_MOAT_1_dim,
_ang=abs(v.op_MOAT_1_ang),
_amp_coef=v.op_MOAT_1_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'MOAT_1_MOAT_2':
# MOAT_1_MOAT_2: drift 31.94m
el.append(srwlib.SRWLOptD(
_L=v.op_MOAT_1_MOAT_2_L,
))
pp.append(v.op_MOAT_1_MOAT_2_pp)
elif el_name == 'MOAT_2':
# MOAT_2: crystal 31.99m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_MOAT_2_d_sp,
_psi0r=v.op_MOAT_2_psi0r,
_psi0i=v.op_MOAT_2_psi0i,
_psi_hr=v.op_MOAT_2_psiHr,
_psi_hi=v.op_MOAT_2_psiHi,
_psi_hbr=v.op_MOAT_2_psiHBr,
_psi_hbi=v.op_MOAT_2_psiHBi,
_tc=v.op_MOAT_2_tc,
_ang_as=v.op_MOAT_2_ang_as,
)
crystal.set_orient(
_nvx=v.op_MOAT_2_nvx,
_nvy=v.op_MOAT_2_nvy,
_nvz=v.op_MOAT_2_nvz,
_tvx=v.op_MOAT_2_tvx,
_tvy=v.op_MOAT_2_tvy,
)
el.append(crystal)
pp.append(v.op_MOAT_2_pp)
elif el_name == 'MOAT_2_HFM':
# MOAT_2_HFM: drift 31.99m
el.append(srwlib.SRWLOptD(
_L=v.op_MOAT_2_HFM_L,
))
pp.append(v.op_MOAT_2_HFM_pp)
elif el_name == 'HFM':
# HFM: sphericalMirror 34.88244m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_HFM_r,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
mirror_file = v.op_HFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by HFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_HFM_dim,
_ang=abs(v.op_HFM_ang),
_amp_coef=v.op_HFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'HFM_VFM':
# HFM_VFM: drift 34.88244m
el.append(srwlib.SRWLOptD(
_L=v.op_HFM_VFM_L,
))
pp.append(v.op_HFM_VFM_pp)
elif el_name == 'VFM':
# VFM: sphericalMirror 38.30244m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_VFM_r,
_size_tang=v.op_VFM_size_tang,
_size_sag=v.op_VFM_size_sag,
_nvx=v.op_VFM_nvx,
_nvy=v.op_VFM_nvy,
_nvz=v.op_VFM_nvz,
_tvx=v.op_VFM_tvx,
_tvy=v.op_VFM_tvy,
_x=v.op_VFM_x,
_y=v.op_VFM_y,
))
pp.append(v.op_VFM_pp)
mirror_file = v.op_VFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by VFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_VFM_dim,
_ang=abs(v.op_VFM_ang),
_amp_coef=v.op_VFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'VFM_VDM':
# VFM_VDM: drift 38.30244m
el.append(srwlib.SRWLOptD(
_L=v.op_VFM_VDM_L,
))
pp.append(v.op_VFM_VDM_pp)
elif el_name == 'VDM':
# VDM: sphericalMirror 39.0m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_VDM_r,
_size_tang=v.op_VDM_size_tang,
_size_sag=v.op_VDM_size_sag,
_nvx=v.op_VDM_nvx,
_nvy=v.op_VDM_nvy,
_nvz=v.op_VDM_nvz,
_tvx=v.op_VDM_tvx,
_tvy=v.op_VDM_tvy,
_x=v.op_VDM_x,
_y=v.op_VDM_y,
))
pp.append(v.op_VDM_pp)
mirror_file = v.op_VDM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by VDM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_VDM_dim,
_ang=abs(v.op_VDM_ang),
_amp_coef=v.op_VDM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'VDM_SSA':
# VDM_SSA: drift 39.0m
el.append(srwlib.SRWLOptD(
_L=v.op_VDM_SSA_L,
))
pp.append(v.op_VDM_SSA_pp)
elif el_name == 'SSA':
# SSA: aperture 47.00244m
el.append(srwlib.SRWLOptA(
_shape=v.op_SSA_shape,
_ap_or_ob='a',
_Dx=v.op_SSA_Dx,
_Dy=v.op_SSA_Dy,
_x=v.op_SSA_x,
_y=v.op_SSA_y,
))
pp.append(v.op_SSA_pp)
elif el_name == 'SSA_ES1':
# SSA_ES1: drift 47.00244m
el.append(srwlib.SRWLOptD(
_L=v.op_SSA_ES1_L,
))
pp.append(v.op_SSA_ES1_pp)
elif el_name == 'ES1':
# ES1: watch 50.9m
pass
elif el_name == 'ES1_CRL':
# ES1_CRL: drift 50.9m
el.append(srwlib.SRWLOptD(
_L=v.op_ES1_CRL_L,
))
pp.append(v.op_ES1_CRL_pp)
elif el_name == 'CRL':
# CRL: crl 57.335m
el.append(srwlib.srwl_opt_setup_CRL(
_foc_plane=v.op_CRL_foc_plane,
_delta=v.op_CRL_delta,
_atten_len=v.op_CRL_atten_len,
_shape=v.op_CRL_shape,
_apert_h=v.op_CRL_apert_h,
_apert_v=v.op_CRL_apert_v,
_r_min=v.op_CRL_r_min,
_n=v.op_CRL_n,
_wall_thick=v.op_CRL_wall_thick,
_xc=v.op_CRL_x,
_yc=v.op_CRL_y,
))
pp.append(v.op_CRL_pp)
elif el_name == 'CRL_ES2':
# CRL_ES2: drift 57.335m
el.append(srwlib.SRWLOptD(
_L=v.op_CRL_ES2_L,
))
pp.append(v.op_CRL_ES2_pp)
elif el_name == 'ES2':
# ES2: watch 59.0m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'NSLS-II SMI beamline', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.4432500000000001, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.00089, 'electron beam relative energy spread'],
['ebm_emx', 'f', 9e-10, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 20.85, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 3.4, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.955, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.023, 'undulator period [m]'],
['und_len', 'f', 2.7945, 'undulator length [m]'],
['und_zc', 'f', 0.6, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 20000.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20400.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 20358.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.0004, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.0004, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1.5, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'u', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# MOAT_1: crystal
['op_MOAT_1_hfn', 's', 'Si_heat204.dat', 'heightProfileFile'],
['op_MOAT_1_dim', 's', 'y', 'orientation'],
['op_MOAT_1_d_sp', 'f', 3.1355713563754857, 'dSpacing'],
['op_MOAT_1_psi0r', 'f', -2.3340005016580434e-06, 'psi0r'],
['op_MOAT_1_psi0i', 'f', 8.597903864165458e-09, 'psi0i'],
['op_MOAT_1_psiHr', 'f', -1.229445079930407e-06, 'psiHr'],
['op_MOAT_1_psiHi', 'f', 6.002829909616196e-09, 'psiHi'],
['op_MOAT_1_psiHBr', 'f', -1.229445079930407e-06, 'psiHBr'],
['op_MOAT_1_psiHBi', 'f', 6.002829909616196e-09, 'psiHBi'],
['op_MOAT_1_tc', 'f', 0.01, 'crystalThickness'],
['op_MOAT_1_ang_as', 'f', 0.0, 'asymmetryAngle'],
['op_MOAT_1_nvx', 'f', -0.0966554453405512, 'nvx'],
['op_MOAT_1_nvy', 'f', 0.9905675873988612, 'nvy'],
['op_MOAT_1_nvz', 'f', -0.097126616747518, 'nvz'],
['op_MOAT_1_tvx', 'f', -0.009432412528249381, 'tvx'],
['op_MOAT_1_tvy', 'f', 0.09666751923327799, 'tvy'],
['op_MOAT_1_ang', 'f', 0.0972799772892332, 'grazingAngle'],
['op_MOAT_1_amp_coef', 'f', 1.0, 'heightAmplification'],
# MOAT_1_MOAT_2: drift
['op_MOAT_1_MOAT_2_L', 'f', 0.04999999999999716, 'length'],
# MOAT_2: crystal
['op_MOAT_2_hfn', 's', 'None', 'heightProfileFile'],
['op_MOAT_2_dim', 's', 'x', 'orientation'],
['op_MOAT_2_d_sp', 'f', 3.1355713563754857, 'dSpacing'],
['op_MOAT_2_psi0r', 'f', -2.3340005016580434e-06, 'psi0r'],
['op_MOAT_2_psi0i', 'f', 8.597903864165458e-09, 'psi0i'],
['op_MOAT_2_psiHr', 'f', -1.229445079930407e-06, 'psiHr'],
['op_MOAT_2_psiHi', 'f', 6.002829909616196e-09, 'psiHi'],
['op_MOAT_2_psiHBr', 'f', -1.229445079930407e-06, 'psiHBr'],
['op_MOAT_2_psiHBi', 'f', 6.002829909616196e-09, 'psiHBi'],
['op_MOAT_2_tc', 'f', 0.01, 'crystalThickness'],
['op_MOAT_2_ang_as', 'f', 0.0, 'asymmetryAngle'],
['op_MOAT_2_nvx', 'f', 0.0966554453405512, 'nvx'],
['op_MOAT_2_nvy', 'f', 0.9905675873988612, 'nvy'],
['op_MOAT_2_nvz', 'f', -0.097126616747518, 'nvz'],
['op_MOAT_2_tvx', 'f', 0.009432412528249381, 'tvx'],
['op_MOAT_2_tvy', 'f', 0.09666751923327799, 'tvy'],
['op_MOAT_2_ang', 'f', 0.0972799772892332, 'grazingAngle'],
['op_MOAT_2_amp_coef', 'f', 1.0, 'heightAmplification'],
# MOAT_2_HFM: drift
['op_MOAT_2_HFM_L', 'f', 2.892440000000004, 'length'],
# HFM: sphericalMirror
['op_HFM_hfn', 's', 'HFM_Rh7.6km.dat', 'heightProfileFile'],
['op_HFM_dim', 's', 'x', 'orientation'],
['op_HFM_r', 'f', 7100.0, 'radius'],
['op_HFM_size_tang', 'f', 0.5, 'tangentialSize'],
['op_HFM_size_sag', 'f', 0.04, 'sagittalSize'],
['op_HFM_ang', 'f', 0.003141592653998904, 'grazingAngle'],
['op_HFM_nvx', 'f', 0.9999950652018569, 'normalVectorX'],
['op_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_HFM_nvz', 'f', -0.003141587486288672, 'normalVectorZ'],
['op_HFM_tvx', 'f', 0.003141587486288672, 'tangentialVectorX'],
['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_HFM_y', 'f', 0.0, 'verticalOffset'],
# HFM_VFM: drift
['op_HFM_VFM_L', 'f', 3.4199999999999946, 'length'],
# VFM: sphericalMirror
['op_VFM_hfn', 's', 'VFM_Rh5.4km.dat', 'heightProfileFile'],
['op_VFM_dim', 's', 'y', 'orientation'],
['op_VFM_r', 'f', 6100.0, 'radius'],
['op_VFM_size_tang', 'f', 0.4, 'tangentialSize'],
['op_VFM_size_sag', 'f', 0.04, 'sagittalSize'],
['op_VFM_ang', 'f', 0.003141592653998904, 'grazingAngle'],
['op_VFM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VFM_nvy', 'f', 0.9999950652018569, 'normalVectorY'],
['op_VFM_nvz', 'f', -0.003141587486288672, 'normalVectorZ'],
['op_VFM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VFM_tvy', 'f', 0.003141587486288672, 'tangentialVectorY'],
['op_VFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VFM_x', 'f', 0.0, 'horizontalOffset'],
['op_VFM_y', 'f', 0.0, 'verticalOffset'],
# VFM_VDM: drift
['op_VFM_VDM_L', 'f', 0.6975600000000028, 'length'],
# VDM: sphericalMirror
['op_VDM_hfn', 's', 'VDM.dat', 'heightProfileFile'],
['op_VDM_dim', 's', 'y', 'orientation'],
['op_VDM_r', 'f', 300000.0, 'radius'],
['op_VDM_size_tang', 'f', 0.4, 'tangentialSize'],
['op_VDM_size_sag', 'f', 0.04, 'sagittalSize'],
['op_VDM_ang', 'f', 0.0031415926, 'grazingAngle'],
['op_VDM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VDM_nvy', 'f', 0.9999950652020265, 'normalVectorY'],
['op_VDM_nvz', 'f', -0.003141587432290035, 'normalVectorZ'],
['op_VDM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VDM_tvy', 'f', 0.003141587432290035, 'tangentialVectorY'],
['op_VDM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VDM_x', 'f', 0.0, 'horizontalOffset'],
['op_VDM_y', 'f', 0.0, 'verticalOffset'],
# VDM_SSA: drift
['op_VDM_SSA_L', 'f', 8.00244, 'length'],
# SSA: aperture
['op_SSA_shape', 's', 'r', 'shape'],
['op_SSA_Dx', 'f', 0.0004, 'horizontalSize'],
['op_SSA_Dy', 'f', 0.0004, 'verticalSize'],
['op_SSA_x', 'f', 0.0, 'horizontalOffset'],
['op_SSA_y', 'f', 0.0, 'verticalOffset'],
# SSA_ES1: drift
['op_SSA_ES1_L', 'f', 3.8975599999999986, 'length'],
# ES1_CRL: drift
['op_ES1_CRL_L', 'f', 6.435000000000002, 'length'],
# CRL: crl
['op_CRL_foc_plane', 'f', 3, 'focalPlane'],
['op_CRL_delta', 'f', 8.211821e-07, 'refractiveIndex'],
['op_CRL_atten_len', 'f', 0.028541, 'attenuationLength'],
['op_CRL_shape', 'f', 1, 'shape'],
['op_CRL_apert_h', 'f', 0.001, 'horizontalApertureSize'],
['op_CRL_apert_v', 'f', 0.001, 'verticalApertureSize'],
['op_CRL_r_min', 'f', 5e-05, 'tipRadius'],
['op_CRL_wall_thick', 'f', 3.24e-05, 'tipWallThickness'],
['op_CRL_x', 'f', 0.0, 'horizontalOffset'],
['op_CRL_y', 'f', 0.0, 'verticalOffset'],
['op_CRL_n', 'i', 23, 'numberOfLenses'],
# CRL_ES2: drift
['op_CRL_ES2_L', 'f', 1.6649999999999991, 'length'],
#---Propagation parameters
['op_MOAT_1_pp', 'f', [0, 0, 1.0, 0, 0, 3.0, 1.0, 3.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_1'],
['op_MOAT_1_MOAT_2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_1_MOAT_2'],
['op_MOAT_2_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_2'],
['op_MOAT_2_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_2_HFM'],
['op_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'],
['op_HFM_VFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_VFM'],
['op_VFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM'],
['op_VFM_VDM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM_VDM'],
['op_VDM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VDM'],
['op_VDM_SSA_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VDM_SSA'],
['op_SSA_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'SSA'],
['op_SSA_ES1_pp', 'f', [0, 0, 1.0, 1, 0, 0.5, 5.0, 0.5, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'SSA_ES1'],
['op_ES1_CRL_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'ES1_CRL'],
['op_CRL_pp', 'f', [0, 0, 1.0, 2, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL'],
['op_CRL_ES2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL_ES2'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 0.4, 3.0, 0.4, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
])
def main():
v = srwl_bl.srwl_uti_parse_options(varParam, use_sys_argv=True)
op = set_optics(v)
v.ss = True
v.ss_pl = 'e'
v.sm = True
v.sm_pl = 'e'
v.pw = True
v.pw_pl = 'xy'
v.si = True
v.si_pl = 'xy'
v.tr = True
v.tr_pl = 'xz'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main() | tests/template/srw_generate_data/nsls-ii-smi-beamline.py | import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['MOAT_1', 'MOAT_1_MOAT_2', 'MOAT_2', 'MOAT_2_HFM', 'HFM', 'HFM_VFM', 'VFM', 'VFM_VDM', 'VDM', 'VDM_SSA', 'SSA', 'SSA_ES1', 'ES1', 'ES1_CRL', 'CRL', 'CRL_ES2', 'ES2']
for el_name in names:
if el_name == 'MOAT_1':
# MOAT_1: crystal 31.94m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_MOAT_1_d_sp,
_psi0r=v.op_MOAT_1_psi0r,
_psi0i=v.op_MOAT_1_psi0i,
_psi_hr=v.op_MOAT_1_psiHr,
_psi_hi=v.op_MOAT_1_psiHi,
_psi_hbr=v.op_MOAT_1_psiHBr,
_psi_hbi=v.op_MOAT_1_psiHBi,
_tc=v.op_MOAT_1_tc,
_ang_as=v.op_MOAT_1_ang_as,
)
crystal.set_orient(
_nvx=v.op_MOAT_1_nvx,
_nvy=v.op_MOAT_1_nvy,
_nvz=v.op_MOAT_1_nvz,
_tvx=v.op_MOAT_1_tvx,
_tvy=v.op_MOAT_1_tvy,
)
el.append(crystal)
pp.append(v.op_MOAT_1_pp)
mirror_file = v.op_MOAT_1_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by MOAT_1 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_MOAT_1_dim,
_ang=abs(v.op_MOAT_1_ang),
_amp_coef=v.op_MOAT_1_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'MOAT_1_MOAT_2':
# MOAT_1_MOAT_2: drift 31.94m
el.append(srwlib.SRWLOptD(
_L=v.op_MOAT_1_MOAT_2_L,
))
pp.append(v.op_MOAT_1_MOAT_2_pp)
elif el_name == 'MOAT_2':
# MOAT_2: crystal 31.99m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_MOAT_2_d_sp,
_psi0r=v.op_MOAT_2_psi0r,
_psi0i=v.op_MOAT_2_psi0i,
_psi_hr=v.op_MOAT_2_psiHr,
_psi_hi=v.op_MOAT_2_psiHi,
_psi_hbr=v.op_MOAT_2_psiHBr,
_psi_hbi=v.op_MOAT_2_psiHBi,
_tc=v.op_MOAT_2_tc,
_ang_as=v.op_MOAT_2_ang_as,
)
crystal.set_orient(
_nvx=v.op_MOAT_2_nvx,
_nvy=v.op_MOAT_2_nvy,
_nvz=v.op_MOAT_2_nvz,
_tvx=v.op_MOAT_2_tvx,
_tvy=v.op_MOAT_2_tvy,
)
el.append(crystal)
pp.append(v.op_MOAT_2_pp)
elif el_name == 'MOAT_2_HFM':
# MOAT_2_HFM: drift 31.99m
el.append(srwlib.SRWLOptD(
_L=v.op_MOAT_2_HFM_L,
))
pp.append(v.op_MOAT_2_HFM_pp)
elif el_name == 'HFM':
# HFM: sphericalMirror 34.88244m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_HFM_r,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
mirror_file = v.op_HFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by HFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_HFM_dim,
_ang=abs(v.op_HFM_ang),
_amp_coef=v.op_HFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'HFM_VFM':
# HFM_VFM: drift 34.88244m
el.append(srwlib.SRWLOptD(
_L=v.op_HFM_VFM_L,
))
pp.append(v.op_HFM_VFM_pp)
elif el_name == 'VFM':
# VFM: sphericalMirror 38.30244m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_VFM_r,
_size_tang=v.op_VFM_size_tang,
_size_sag=v.op_VFM_size_sag,
_nvx=v.op_VFM_nvx,
_nvy=v.op_VFM_nvy,
_nvz=v.op_VFM_nvz,
_tvx=v.op_VFM_tvx,
_tvy=v.op_VFM_tvy,
_x=v.op_VFM_x,
_y=v.op_VFM_y,
))
pp.append(v.op_VFM_pp)
mirror_file = v.op_VFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by VFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_VFM_dim,
_ang=abs(v.op_VFM_ang),
_amp_coef=v.op_VFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'VFM_VDM':
# VFM_VDM: drift 38.30244m
el.append(srwlib.SRWLOptD(
_L=v.op_VFM_VDM_L,
))
pp.append(v.op_VFM_VDM_pp)
elif el_name == 'VDM':
# VDM: sphericalMirror 39.0m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_VDM_r,
_size_tang=v.op_VDM_size_tang,
_size_sag=v.op_VDM_size_sag,
_nvx=v.op_VDM_nvx,
_nvy=v.op_VDM_nvy,
_nvz=v.op_VDM_nvz,
_tvx=v.op_VDM_tvx,
_tvy=v.op_VDM_tvy,
_x=v.op_VDM_x,
_y=v.op_VDM_y,
))
pp.append(v.op_VDM_pp)
mirror_file = v.op_VDM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by VDM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_VDM_dim,
_ang=abs(v.op_VDM_ang),
_amp_coef=v.op_VDM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'VDM_SSA':
# VDM_SSA: drift 39.0m
el.append(srwlib.SRWLOptD(
_L=v.op_VDM_SSA_L,
))
pp.append(v.op_VDM_SSA_pp)
elif el_name == 'SSA':
# SSA: aperture 47.00244m
el.append(srwlib.SRWLOptA(
_shape=v.op_SSA_shape,
_ap_or_ob='a',
_Dx=v.op_SSA_Dx,
_Dy=v.op_SSA_Dy,
_x=v.op_SSA_x,
_y=v.op_SSA_y,
))
pp.append(v.op_SSA_pp)
elif el_name == 'SSA_ES1':
# SSA_ES1: drift 47.00244m
el.append(srwlib.SRWLOptD(
_L=v.op_SSA_ES1_L,
))
pp.append(v.op_SSA_ES1_pp)
elif el_name == 'ES1':
# ES1: watch 50.9m
pass
elif el_name == 'ES1_CRL':
# ES1_CRL: drift 50.9m
el.append(srwlib.SRWLOptD(
_L=v.op_ES1_CRL_L,
))
pp.append(v.op_ES1_CRL_pp)
elif el_name == 'CRL':
# CRL: crl 57.335m
el.append(srwlib.srwl_opt_setup_CRL(
_foc_plane=v.op_CRL_foc_plane,
_delta=v.op_CRL_delta,
_atten_len=v.op_CRL_atten_len,
_shape=v.op_CRL_shape,
_apert_h=v.op_CRL_apert_h,
_apert_v=v.op_CRL_apert_v,
_r_min=v.op_CRL_r_min,
_n=v.op_CRL_n,
_wall_thick=v.op_CRL_wall_thick,
_xc=v.op_CRL_x,
_yc=v.op_CRL_y,
))
pp.append(v.op_CRL_pp)
elif el_name == 'CRL_ES2':
# CRL_ES2: drift 57.335m
el.append(srwlib.SRWLOptD(
_L=v.op_CRL_ES2_L,
))
pp.append(v.op_CRL_ES2_pp)
elif el_name == 'ES2':
# ES2: watch 59.0m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'NSLS-II SMI beamline', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.4432500000000001, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.00089, 'electron beam relative energy spread'],
['ebm_emx', 'f', 9e-10, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 20.85, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 3.4, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.955, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.023, 'undulator period [m]'],
['und_len', 'f', 2.7945, 'undulator length [m]'],
['und_zc', 'f', 0.6, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 20000.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20400.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 20358.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.0004, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.0004, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1.5, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'u', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# MOAT_1: crystal
['op_MOAT_1_hfn', 's', 'Si_heat204.dat', 'heightProfileFile'],
['op_MOAT_1_dim', 's', 'y', 'orientation'],
['op_MOAT_1_d_sp', 'f', 3.1355713563754857, 'dSpacing'],
['op_MOAT_1_psi0r', 'f', -2.3340005016580434e-06, 'psi0r'],
['op_MOAT_1_psi0i', 'f', 8.597903864165458e-09, 'psi0i'],
['op_MOAT_1_psiHr', 'f', -1.229445079930407e-06, 'psiHr'],
['op_MOAT_1_psiHi', 'f', 6.002829909616196e-09, 'psiHi'],
['op_MOAT_1_psiHBr', 'f', -1.229445079930407e-06, 'psiHBr'],
['op_MOAT_1_psiHBi', 'f', 6.002829909616196e-09, 'psiHBi'],
['op_MOAT_1_tc', 'f', 0.01, 'crystalThickness'],
['op_MOAT_1_ang_as', 'f', 0.0, 'asymmetryAngle'],
['op_MOAT_1_nvx', 'f', -0.0966554453405512, 'nvx'],
['op_MOAT_1_nvy', 'f', 0.9905675873988612, 'nvy'],
['op_MOAT_1_nvz', 'f', -0.097126616747518, 'nvz'],
['op_MOAT_1_tvx', 'f', -0.009432412528249381, 'tvx'],
['op_MOAT_1_tvy', 'f', 0.09666751923327799, 'tvy'],
['op_MOAT_1_ang', 'f', 0.0972799772892332, 'grazingAngle'],
['op_MOAT_1_amp_coef', 'f', 1.0, 'heightAmplification'],
# MOAT_1_MOAT_2: drift
['op_MOAT_1_MOAT_2_L', 'f', 0.04999999999999716, 'length'],
# MOAT_2: crystal
['op_MOAT_2_hfn', 's', 'None', 'heightProfileFile'],
['op_MOAT_2_dim', 's', 'x', 'orientation'],
['op_MOAT_2_d_sp', 'f', 3.1355713563754857, 'dSpacing'],
['op_MOAT_2_psi0r', 'f', -2.3340005016580434e-06, 'psi0r'],
['op_MOAT_2_psi0i', 'f', 8.597903864165458e-09, 'psi0i'],
['op_MOAT_2_psiHr', 'f', -1.229445079930407e-06, 'psiHr'],
['op_MOAT_2_psiHi', 'f', 6.002829909616196e-09, 'psiHi'],
['op_MOAT_2_psiHBr', 'f', -1.229445079930407e-06, 'psiHBr'],
['op_MOAT_2_psiHBi', 'f', 6.002829909616196e-09, 'psiHBi'],
['op_MOAT_2_tc', 'f', 0.01, 'crystalThickness'],
['op_MOAT_2_ang_as', 'f', 0.0, 'asymmetryAngle'],
['op_MOAT_2_nvx', 'f', 0.0966554453405512, 'nvx'],
['op_MOAT_2_nvy', 'f', 0.9905675873988612, 'nvy'],
['op_MOAT_2_nvz', 'f', -0.097126616747518, 'nvz'],
['op_MOAT_2_tvx', 'f', 0.009432412528249381, 'tvx'],
['op_MOAT_2_tvy', 'f', 0.09666751923327799, 'tvy'],
['op_MOAT_2_ang', 'f', 0.0972799772892332, 'grazingAngle'],
['op_MOAT_2_amp_coef', 'f', 1.0, 'heightAmplification'],
# MOAT_2_HFM: drift
['op_MOAT_2_HFM_L', 'f', 2.892440000000004, 'length'],
# HFM: sphericalMirror
['op_HFM_hfn', 's', 'HFM_Rh7.6km.dat', 'heightProfileFile'],
['op_HFM_dim', 's', 'x', 'orientation'],
['op_HFM_r', 'f', 7100.0, 'radius'],
['op_HFM_size_tang', 'f', 0.5, 'tangentialSize'],
['op_HFM_size_sag', 'f', 0.04, 'sagittalSize'],
['op_HFM_ang', 'f', 0.003141592653998904, 'grazingAngle'],
['op_HFM_nvx', 'f', 0.9999950652018569, 'normalVectorX'],
['op_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_HFM_nvz', 'f', -0.003141587486288672, 'normalVectorZ'],
['op_HFM_tvx', 'f', 0.003141587486288672, 'tangentialVectorX'],
['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_HFM_y', 'f', 0.0, 'verticalOffset'],
# HFM_VFM: drift
['op_HFM_VFM_L', 'f', 3.4199999999999946, 'length'],
# VFM: sphericalMirror
['op_VFM_hfn', 's', 'VFM_Rh5.4km.dat', 'heightProfileFile'],
['op_VFM_dim', 's', 'y', 'orientation'],
['op_VFM_r', 'f', 6100.0, 'radius'],
['op_VFM_size_tang', 'f', 0.4, 'tangentialSize'],
['op_VFM_size_sag', 'f', 0.04, 'sagittalSize'],
['op_VFM_ang', 'f', 0.003141592653998904, 'grazingAngle'],
['op_VFM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VFM_nvy', 'f', 0.9999950652018569, 'normalVectorY'],
['op_VFM_nvz', 'f', -0.003141587486288672, 'normalVectorZ'],
['op_VFM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VFM_tvy', 'f', 0.003141587486288672, 'tangentialVectorY'],
['op_VFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VFM_x', 'f', 0.0, 'horizontalOffset'],
['op_VFM_y', 'f', 0.0, 'verticalOffset'],
# VFM_VDM: drift
['op_VFM_VDM_L', 'f', 0.6975600000000028, 'length'],
# VDM: sphericalMirror
['op_VDM_hfn', 's', 'VDM.dat', 'heightProfileFile'],
['op_VDM_dim', 's', 'y', 'orientation'],
['op_VDM_r', 'f', 300000.0, 'radius'],
['op_VDM_size_tang', 'f', 0.4, 'tangentialSize'],
['op_VDM_size_sag', 'f', 0.04, 'sagittalSize'],
['op_VDM_ang', 'f', 0.0031415926, 'grazingAngle'],
['op_VDM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VDM_nvy', 'f', 0.9999950652020265, 'normalVectorY'],
['op_VDM_nvz', 'f', -0.003141587432290035, 'normalVectorZ'],
['op_VDM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VDM_tvy', 'f', 0.003141587432290035, 'tangentialVectorY'],
['op_VDM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VDM_x', 'f', 0.0, 'horizontalOffset'],
['op_VDM_y', 'f', 0.0, 'verticalOffset'],
# VDM_SSA: drift
['op_VDM_SSA_L', 'f', 8.00244, 'length'],
# SSA: aperture
['op_SSA_shape', 's', 'r', 'shape'],
['op_SSA_Dx', 'f', 0.0004, 'horizontalSize'],
['op_SSA_Dy', 'f', 0.0004, 'verticalSize'],
['op_SSA_x', 'f', 0.0, 'horizontalOffset'],
['op_SSA_y', 'f', 0.0, 'verticalOffset'],
# SSA_ES1: drift
['op_SSA_ES1_L', 'f', 3.8975599999999986, 'length'],
# ES1_CRL: drift
['op_ES1_CRL_L', 'f', 6.435000000000002, 'length'],
# CRL: crl
['op_CRL_foc_plane', 'f', 3, 'focalPlane'],
['op_CRL_delta', 'f', 8.211821e-07, 'refractiveIndex'],
['op_CRL_atten_len', 'f', 0.028541, 'attenuationLength'],
['op_CRL_shape', 'f', 1, 'shape'],
['op_CRL_apert_h', 'f', 0.001, 'horizontalApertureSize'],
['op_CRL_apert_v', 'f', 0.001, 'verticalApertureSize'],
['op_CRL_r_min', 'f', 5e-05, 'tipRadius'],
['op_CRL_wall_thick', 'f', 3.24e-05, 'tipWallThickness'],
['op_CRL_x', 'f', 0.0, 'horizontalOffset'],
['op_CRL_y', 'f', 0.0, 'verticalOffset'],
['op_CRL_n', 'i', 23, 'numberOfLenses'],
# CRL_ES2: drift
['op_CRL_ES2_L', 'f', 1.6649999999999991, 'length'],
#---Propagation parameters
['op_MOAT_1_pp', 'f', [0, 0, 1.0, 0, 0, 3.0, 1.0, 3.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_1'],
['op_MOAT_1_MOAT_2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_1_MOAT_2'],
['op_MOAT_2_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_2'],
['op_MOAT_2_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'MOAT_2_HFM'],
['op_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'],
['op_HFM_VFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_VFM'],
['op_VFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM'],
['op_VFM_VDM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM_VDM'],
['op_VDM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VDM'],
['op_VDM_SSA_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VDM_SSA'],
['op_SSA_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'SSA'],
['op_SSA_ES1_pp', 'f', [0, 0, 1.0, 1, 0, 0.5, 5.0, 0.5, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'SSA_ES1'],
['op_ES1_CRL_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'ES1_CRL'],
['op_CRL_pp', 'f', [0, 0, 1.0, 2, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL'],
['op_CRL_ES2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL_ES2'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 0.4, 3.0, 0.4, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
])
def main():
v = srwl_bl.srwl_uti_parse_options(varParam, use_sys_argv=True)
op = set_optics(v)
v.ss = True
v.ss_pl = 'e'
v.sm = True
v.sm_pl = 'e'
v.pw = True
v.pw_pl = 'xy'
v.si = True
v.si_pl = 'xy'
v.tr = True
v.tr_pl = 'xz'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main() | 0.158142 | 0.222025 |
from __future__ import unicode_literals
import posixpath
from uuid import uuid4
from sqlalchemy.dialects.postgresql import JSONB, UUID
from indico.core.config import config
from indico.core.db import db
from indico.core.storage import StoredFileMixin
from indico.util.fs import secure_filename
from indico.util.string import format_repr, return_ascii, strict_unicode
class File(StoredFileMixin, db.Model):
__tablename__ = 'files'
__table_args__ = {'schema': 'indico'}
id = db.Column(
db.Integer,
primary_key=True
)
uuid = db.Column(
UUID(as_uuid=True),
index=True,
unique=True,
nullable=False,
default=lambda: unicode(uuid4())
)
#: Whether the file has been associated with something.
#: Unclaimed files may be deleted automatically after a while.
claimed = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: Metadata that may be set when the file gets claimed.
meta = db.Column(
JSONB,
nullable=False,
default={}
)
# relationship backrefs:
# - editing_revision_files (EditingRevisionFile.file)
def claim(self, **meta):
"""Mark the file as claimed by some object it's linked to.
By claiming a file the linked object takes ownership of it so the
file will not be automatically deleted.
:param force: Whether the file can already
"""
self.claimed = True
# XXX: Should we check for conflicts with existing metadata in case the
# file was already claimed?
self.meta = meta
def _build_storage_path(self):
path_segments = list(map(strict_unicode, self.__context))
self.assign_id()
filename = '{}-{}'.format(self.id, secure_filename(self.filename, 'file'))
path = posixpath.join(*(path_segments + [filename]))
return config.ATTACHMENT_STORAGE, path
def save(self, context, data):
self.__context = context
super(File, self).save(data)
del self.__context
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'uuid', 'content_type', _text=self.filename) | indico/modules/files/models/files.py |
from __future__ import unicode_literals
import posixpath
from uuid import uuid4
from sqlalchemy.dialects.postgresql import JSONB, UUID
from indico.core.config import config
from indico.core.db import db
from indico.core.storage import StoredFileMixin
from indico.util.fs import secure_filename
from indico.util.string import format_repr, return_ascii, strict_unicode
class File(StoredFileMixin, db.Model):
__tablename__ = 'files'
__table_args__ = {'schema': 'indico'}
id = db.Column(
db.Integer,
primary_key=True
)
uuid = db.Column(
UUID(as_uuid=True),
index=True,
unique=True,
nullable=False,
default=lambda: unicode(uuid4())
)
#: Whether the file has been associated with something.
#: Unclaimed files may be deleted automatically after a while.
claimed = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: Metadata that may be set when the file gets claimed.
meta = db.Column(
JSONB,
nullable=False,
default={}
)
# relationship backrefs:
# - editing_revision_files (EditingRevisionFile.file)
def claim(self, **meta):
"""Mark the file as claimed by some object it's linked to.
By claiming a file the linked object takes ownership of it so the
file will not be automatically deleted.
:param force: Whether the file can already
"""
self.claimed = True
# XXX: Should we check for conflicts with existing metadata in case the
# file was already claimed?
self.meta = meta
def _build_storage_path(self):
path_segments = list(map(strict_unicode, self.__context))
self.assign_id()
filename = '{}-{}'.format(self.id, secure_filename(self.filename, 'file'))
path = posixpath.join(*(path_segments + [filename]))
return config.ATTACHMENT_STORAGE, path
def save(self, context, data):
self.__context = context
super(File, self).save(data)
del self.__context
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'uuid', 'content_type', _text=self.filename) | 0.552057 | 0.095013 |
import unittest
from openprocurement.archivarius.tenders.tests.base import BaseTenderArchivariusWebTest
class TenderArchivariusResourceTest(BaseTenderArchivariusWebTest):
def test_dump_tender_invalid(self):
response = self.app.get('/tenders/some_id/dump', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/dump'.format(self.tender_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Forbidden', u'location': u'url', u'name': u'permission'}
])
def test_dump_tender(self):
self.app.authorization = ('Basic', ('archivarius', ''))
response = self.app.get('/tenders/{}/dump'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('data', response.json)
def test_delete_tender_invalid(self):
response = self.app.delete('/tenders/some_id/dump', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'tender_id'}
])
response = self.app.delete('/tenders/{}/dump'.format(self.tender_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Forbidden', u'location': u'url', u'name': u'permission'}
])
def test_delete_tender(self):
self.app.authorization = ('Basic', ('archivarius', ''))
response = self.app.delete('/tenders/{}/dump'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('data', response.json)
response = self.app.get('/tenders/{}/dump'.format(self.tender_id), status=410)
self.assertEqual(response.status, '410 Gone')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Archived', u'location': u'url', u'name': u'tender_id'}
])
self.app.authorization = ('Basic', ('broker', ''))
self.create_tender()
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderArchivariusResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite') | openprocurement/archivarius/tenders/tests/dump.py | import unittest
from openprocurement.archivarius.tenders.tests.base import BaseTenderArchivariusWebTest
class TenderArchivariusResourceTest(BaseTenderArchivariusWebTest):
def test_dump_tender_invalid(self):
response = self.app.get('/tenders/some_id/dump', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/dump'.format(self.tender_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Forbidden', u'location': u'url', u'name': u'permission'}
])
def test_dump_tender(self):
self.app.authorization = ('Basic', ('archivarius', ''))
response = self.app.get('/tenders/{}/dump'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('data', response.json)
def test_delete_tender_invalid(self):
response = self.app.delete('/tenders/some_id/dump', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'tender_id'}
])
response = self.app.delete('/tenders/{}/dump'.format(self.tender_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Forbidden', u'location': u'url', u'name': u'permission'}
])
def test_delete_tender(self):
self.app.authorization = ('Basic', ('archivarius', ''))
response = self.app.delete('/tenders/{}/dump'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('data', response.json)
response = self.app.get('/tenders/{}/dump'.format(self.tender_id), status=410)
self.assertEqual(response.status, '410 Gone')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Archived', u'location': u'url', u'name': u'tender_id'}
])
self.app.authorization = ('Basic', ('broker', ''))
self.create_tender()
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderArchivariusResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite') | 0.413004 | 0.280173 |
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel
from .models import data_flow_type_mapping
missing = Sentinel("Missing")
class DataFlowClient(object):
"""
Use the Data Flow APIs to run any Apache Spark application at any scale without deploying or managing any infrastructure.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'timeout': kwargs.get('timeout'),
'base_path': '/20200129',
'service_endpoint_template': 'https://dataflow.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False)
}
self.base_client = BaseClient("data_flow", config, signer, data_flow_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
def change_application_compartment(self, application_id, change_application_compartment_details, **kwargs):
"""
Moves an application into a different compartment. When provided, If-Match is checked against ETag values of the resource.
Associated resources, like runs, will not be automatically moved.
:param str application_id: (required)
The unique ID for an application.
:param ChangeApplicationCompartmentDetails change_application_compartment_details: (required)
Details for changing an application's compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_application_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_application_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_application_compartment_details)
def change_run_compartment(self, run_id, change_run_compartment_details, **kwargs):
"""
Moves a run into a different compartment. When provided, If-Match is checked against ETag
values of the resource. Associated resources, like historical metrics, will not be
automatically moved. The run must be in a terminal state (CANCELED, FAILED, SUCCEEDED) in
order for it to be moved to a different compartment
:param str run_id: (required)
The unique ID for the run
:param ChangeRunCompartmentDetails change_run_compartment_details: (required)
Details for changing a run's compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_run_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_run_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_run_compartment_details)
def create_application(self, create_application_details, **kwargs):
"""
Creates an application.
:param CreateApplicationDetails create_application_details: (required)
Details to create an application.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Application`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_application got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_application_details,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_application_details,
response_type="Application")
def create_run(self, create_run_details, **kwargs):
"""
Creates a run for an application.
:param CreateRunDetails create_run_details: (required)
Details for creating a run of an application.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Run`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_run got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_run_details,
response_type="Run")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_run_details,
response_type="Run")
def delete_application(self, application_id, **kwargs):
"""
Deletes an application using an `applicationId`.
:param str application_id: (required)
The unique ID for an application.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_run(self, run_id, **kwargs):
"""
Cancels the specified run if it has not already completed or was previously cancelled.
If a run is in progress, the executing job will be killed.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def get_application(self, application_id, **kwargs):
"""
Retrieves an application using an `applicationId`.
:param str application_id: (required)
The unique ID for an application.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Application`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Application")
def get_run(self, run_id, **kwargs):
"""
Retrieves the run for the specified `runId`.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Run`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Run")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Run")
def get_run_log(self, run_id, name, **kwargs):
"""
Retrieves the content of an run log.
:param str run_id: (required)
The unique ID for the run
:param str name: (required)
The name of the log. Avoid entering confidential information.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type stream
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}/logs/{name}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_run_log got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id,
"name": name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/octet-stream",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
def list_applications(self, compartment_id, **kwargs):
"""
Lists all applications in the specified compartment.
:param str compartment_id: (required)
The OCID of the compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param int limit: (optional)
The maximum number of results to return in a paginated `List` call.
:param str page: (optional)
The value of the `opc-next-page` or `opc-prev-page` response header from the last `List` call
to sent back to server for getting the next page of results.
:param str sort_by: (optional)
The field used to sort the results. Multiple fields are not supported.
Allowed values are: "timeCreated", "displayName", "language"
:param str sort_order: (optional)
The ordering of results in ascending or descending order.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
The query parameter for the Spark application name.
:param str owner_principal_id: (optional)
The OCID of the user who created the resource.
:param str display_name_starts_with: (optional)
The displayName prefix.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_flow.models.ApplicationSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"display_name",
"owner_principal_id",
"display_name_starts_with"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_applications got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName", "language"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing),
"ownerPrincipalId": kwargs.get("owner_principal_id", missing),
"displayNameStartsWith": kwargs.get("display_name_starts_with", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ApplicationSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ApplicationSummary]")
def list_run_logs(self, run_id, **kwargs):
"""
Retrieves summaries of the run's logs.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param int limit: (optional)
The maximum number of results to return in a paginated \"List\" call.
:param str page: (optional)
The value of the `opc-next-page` or `opc-prev-page` response header from the last `List` call
to sent back to server for getting the next page of results.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_flow.models.RunLogSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_run_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[RunLogSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[RunLogSummary]")
def list_runs(self, compartment_id, **kwargs):
"""
Lists all runs of an application in the specified compartment.
:param str compartment_id: (required)
The OCID of the compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str application_id: (optional)
The ID of the application.
:param str owner_principal_id: (optional)
The OCID of the user who created the resource.
:param str display_name_starts_with: (optional)
The displayName prefix.
:param str lifecycle_state: (optional)
The LifecycleState of the run.
Allowed values are: "ACCEPTED", "IN_PROGRESS", "CANCELING", "CANCELED", "FAILED", "SUCCEEDED"
:param datetime time_created_greater_than: (optional)
The epoch time that the resource was created.
:param int limit: (optional)
The maximum number of results to return in a paginated `List` call.
:param str page: (optional)
The value of the `opc-next-page` or `opc-prev-page` response header from the last `List` call
to sent back to server for getting the next page of results.
:param str sort_by: (optional)
The field used to sort the results. Multiple fields are not supported.
Allowed values are: "timeCreated", "displayName", "language", "runDurationInMilliseconds", "lifecycleState", "totalOCpu", "dataReadInBytes", "dataWrittenInBytes"
:param str sort_order: (optional)
The ordering of results in ascending or descending order.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
The query parameter for the Spark application name.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_flow.models.RunSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"application_id",
"owner_principal_id",
"display_name_starts_with",
"lifecycle_state",
"time_created_greater_than",
"limit",
"page",
"sort_by",
"sort_order",
"display_name"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_runs got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACCEPTED", "IN_PROGRESS", "CANCELING", "CANCELED", "FAILED", "SUCCEEDED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName", "language", "runDurationInMilliseconds", "lifecycleState", "totalOCpu", "dataReadInBytes", "dataWrittenInBytes"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"applicationId": kwargs.get("application_id", missing),
"ownerPrincipalId": kwargs.get("owner_principal_id", missing),
"displayNameStartsWith": kwargs.get("display_name_starts_with", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"timeCreatedGreaterThan": kwargs.get("time_created_greater_than", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[RunSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[RunSummary]")
def update_application(self, update_application_details, application_id, **kwargs):
"""
Updates an application using an `applicationId`.
:param UpdateApplicationDetails update_application_details: (required)
Details for updating an application.
:param str application_id: (required)
The unique ID for an application.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Application`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_application_details,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_application_details,
response_type="Application")
def update_run(self, update_run_details, run_id, **kwargs):
"""
Updates a run using a `runId`.
:param UpdateRunDetails update_run_details: (required)
Details for updating a run.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Run`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_run_details,
response_type="Run")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_run_details,
response_type="Run") | darling_ansible/python_venv/lib/python3.7/site-packages/oci/data_flow/data_flow_client.py |
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel
from .models import data_flow_type_mapping
missing = Sentinel("Missing")
class DataFlowClient(object):
"""
Use the Data Flow APIs to run any Apache Spark application at any scale without deploying or managing any infrastructure.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'timeout': kwargs.get('timeout'),
'base_path': '/20200129',
'service_endpoint_template': 'https://dataflow.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False)
}
self.base_client = BaseClient("data_flow", config, signer, data_flow_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
def change_application_compartment(self, application_id, change_application_compartment_details, **kwargs):
"""
Moves an application into a different compartment. When provided, If-Match is checked against ETag values of the resource.
Associated resources, like runs, will not be automatically moved.
:param str application_id: (required)
The unique ID for an application.
:param ChangeApplicationCompartmentDetails change_application_compartment_details: (required)
Details for changing an application's compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_application_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_application_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_application_compartment_details)
def change_run_compartment(self, run_id, change_run_compartment_details, **kwargs):
"""
Moves a run into a different compartment. When provided, If-Match is checked against ETag
values of the resource. Associated resources, like historical metrics, will not be
automatically moved. The run must be in a terminal state (CANCELED, FAILED, SUCCEEDED) in
order for it to be moved to a different compartment
:param str run_id: (required)
The unique ID for the run
:param ChangeRunCompartmentDetails change_run_compartment_details: (required)
Details for changing a run's compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_run_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_run_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_run_compartment_details)
def create_application(self, create_application_details, **kwargs):
"""
Creates an application.
:param CreateApplicationDetails create_application_details: (required)
Details to create an application.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Application`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_application got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_application_details,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_application_details,
response_type="Application")
def create_run(self, create_run_details, **kwargs):
"""
Creates a run for an application.
:param CreateRunDetails create_run_details: (required)
Details for creating a run of an application.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error
without risk of executing that same action again. Retry tokens expire after 24 hours,
but can be invalidated before then due to conflicting operations.
For example, if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Run`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_run got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_run_details,
response_type="Run")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_run_details,
response_type="Run")
def delete_application(self, application_id, **kwargs):
"""
Deletes an application using an `applicationId`.
:param str application_id: (required)
The unique ID for an application.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_run(self, run_id, **kwargs):
"""
Cancels the specified run if it has not already completed or was previously cancelled.
If a run is in progress, the executing job will be killed.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def get_application(self, application_id, **kwargs):
"""
Retrieves an application using an `applicationId`.
:param str application_id: (required)
The unique ID for an application.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Application`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Application")
def get_run(self, run_id, **kwargs):
"""
Retrieves the run for the specified `runId`.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Run`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Run")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Run")
def get_run_log(self, run_id, name, **kwargs):
"""
Retrieves the content of an run log.
:param str run_id: (required)
The unique ID for the run
:param str name: (required)
The name of the log. Avoid entering confidential information.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type stream
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}/logs/{name}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_run_log got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id,
"name": name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/octet-stream",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
def list_applications(self, compartment_id, **kwargs):
"""
Lists all applications in the specified compartment.
:param str compartment_id: (required)
The OCID of the compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param int limit: (optional)
The maximum number of results to return in a paginated `List` call.
:param str page: (optional)
The value of the `opc-next-page` or `opc-prev-page` response header from the last `List` call
to sent back to server for getting the next page of results.
:param str sort_by: (optional)
The field used to sort the results. Multiple fields are not supported.
Allowed values are: "timeCreated", "displayName", "language"
:param str sort_order: (optional)
The ordering of results in ascending or descending order.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
The query parameter for the Spark application name.
:param str owner_principal_id: (optional)
The OCID of the user who created the resource.
:param str display_name_starts_with: (optional)
The displayName prefix.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_flow.models.ApplicationSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"display_name",
"owner_principal_id",
"display_name_starts_with"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_applications got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName", "language"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing),
"ownerPrincipalId": kwargs.get("owner_principal_id", missing),
"displayNameStartsWith": kwargs.get("display_name_starts_with", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ApplicationSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ApplicationSummary]")
def list_run_logs(self, run_id, **kwargs):
"""
Retrieves summaries of the run's logs.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param int limit: (optional)
The maximum number of results to return in a paginated \"List\" call.
:param str page: (optional)
The value of the `opc-next-page` or `opc-prev-page` response header from the last `List` call
to sent back to server for getting the next page of results.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_flow.models.RunLogSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"limit",
"page"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_run_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[RunLogSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[RunLogSummary]")
def list_runs(self, compartment_id, **kwargs):
"""
Lists all runs of an application in the specified compartment.
:param str compartment_id: (required)
The OCID of the compartment.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str application_id: (optional)
The ID of the application.
:param str owner_principal_id: (optional)
The OCID of the user who created the resource.
:param str display_name_starts_with: (optional)
The displayName prefix.
:param str lifecycle_state: (optional)
The LifecycleState of the run.
Allowed values are: "ACCEPTED", "IN_PROGRESS", "CANCELING", "CANCELED", "FAILED", "SUCCEEDED"
:param datetime time_created_greater_than: (optional)
The epoch time that the resource was created.
:param int limit: (optional)
The maximum number of results to return in a paginated `List` call.
:param str page: (optional)
The value of the `opc-next-page` or `opc-prev-page` response header from the last `List` call
to sent back to server for getting the next page of results.
:param str sort_by: (optional)
The field used to sort the results. Multiple fields are not supported.
Allowed values are: "timeCreated", "displayName", "language", "runDurationInMilliseconds", "lifecycleState", "totalOCpu", "dataReadInBytes", "dataWrittenInBytes"
:param str sort_order: (optional)
The ordering of results in ascending or descending order.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
The query parameter for the Spark application name.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_flow.models.RunSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"application_id",
"owner_principal_id",
"display_name_starts_with",
"lifecycle_state",
"time_created_greater_than",
"limit",
"page",
"sort_by",
"sort_order",
"display_name"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_runs got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACCEPTED", "IN_PROGRESS", "CANCELING", "CANCELED", "FAILED", "SUCCEEDED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName", "language", "runDurationInMilliseconds", "lifecycleState", "totalOCpu", "dataReadInBytes", "dataWrittenInBytes"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"applicationId": kwargs.get("application_id", missing),
"ownerPrincipalId": kwargs.get("owner_principal_id", missing),
"displayNameStartsWith": kwargs.get("display_name_starts_with", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"timeCreatedGreaterThan": kwargs.get("time_created_greater_than", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[RunSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[RunSummary]")
def update_application(self, update_application_details, application_id, **kwargs):
"""
Updates an application using an `applicationId`.
:param UpdateApplicationDetails update_application_details: (required)
Details for updating an application.
:param str application_id: (required)
The unique ID for an application.
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Application`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/applications/{applicationId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"applicationId": application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_application_details,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_application_details,
response_type="Application")
def update_run(self, update_run_details, run_id, **kwargs):
"""
Updates a run using a `runId`.
:param UpdateRunDetails update_run_details: (required)
Details for updating a run.
:param str run_id: (required)
The unique ID for the run
:param str opc_request_id: (optional)
Unique identifier for the request. If provided, the returned request ID will include this value.
Otherwise, a random request ID will be generated by the service.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource,
set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_flow.models.Run`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/runs/{runId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"runId": run_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_run_details,
response_type="Run")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_run_details,
response_type="Run") | 0.86977 | 0.272999 |