text
stringlengths
0
1.05M
meta
dict
"""A context manager to preserve global variables over IPython-parallel. See the PreserveVars class. """ from IPython.parallel import interactive _base_store_name = '_preservevars_store' @interactive def _enter (base_store_name, data): # get variable to store in (can't be in data) store_name = base_store_name while store_name in data: store_name += '_' # initialise store gl = globals() to_del = [] store = {} if store_name in gl: old_store = gl[store_name] else: to_del.append(store_name) old_store = None gl[store_name] = (to_del, store, old_store) # store data and set new values for name, val in data.iteritems(): if name in gl: store[name] = gl[name] else: to_del.append(name) gl[name] = val return store_name @interactive def _exit (store_name): # get store gl = globals() to_del, store, old_store = gl[store_name] # replace data that existed for name, val in store.iteritems(): gl[name] = val gl[store_name] = old_store # delete references for data that didn't exist for name in to_del: del gl[name] class PreserveVars: """A context manager to preserve global variables over IPython-parallel. PreserveVars(dv[, data], **kwargs) dv: the DirectView to use. data: a dict of variables to transfer to the engines dv covers, as taken by dv.push. kwargs: keyword arguments are also transferred. That is, PreserveVars({'x': 5, 'y': 10}) is equivalent to PreserveVars(x = 5, y = 10) or even PreserveVars({'x': 5}, y = 10) """ def __init__ (self, dv, data = {}, **kwargs): self.dv = dv data.update(kwargs) self.data = data def __enter__ (self): self.store_names = self.dv.apply(_enter, _base_store_name, self.data) def __exit__ (self, *args): dv = self.dv targets = dv.targets for target, store_name in zip(targets, self.store_names): dv.targets = [target] dv.apply(_exit, store_name) dv.targets = targets
{ "repo_name": "cedadev/ipython_project", "path": "preservevars.py", "copies": "1", "size": "2142", "license": "bsd-3-clause", "hash": -1399388692580263200, "line_mean": 23.9186046512, "line_max": 77, "alpha_frac": 0.5994397759, "autogenerated": false, "ratio": 3.558139534883721, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46575793107837204, "avg_score": null, "num_lines": null }
""" A context where other parts share global state. """ import logging import struct from ...common import CompilerError from ...arch.arch_info import Endianness from ... import ir from .nodes.types import BasicType from .nodes import types, expressions from .utils import required_padding from .eval import ConstantExpressionEvaluator class CContext: """ A context as a substitute for global data """ logger = logging.getLogger("ccontext") def __init__(self, coptions, arch_info): self.coptions = coptions self.arch_info = arch_info self._expression_evaluator = ConstantExpressionEvaluator(self) self._field_offsets = {} self._enum_values = {} int_size = self.arch_info.get_size("int") int_alignment = self.arch_info.get_alignment("int") long_size = max(int_size, 4) long_alignment = max(int_alignment, 4) ptr_size = self.arch_info.get_size("ptr") double_size = self.arch_info.get_size(ir.f64) double_alignment = self.arch_info.get_alignment(ir.f64) self.type_size_map = { BasicType.CHAR: (1, 1), BasicType.UCHAR: (1, 1), BasicType.SHORT: (2, 2), BasicType.USHORT: (2, 2), BasicType.INT: (int_size, int_alignment), BasicType.UINT: (int_size, int_alignment), BasicType.LONG: (long_size, long_alignment), BasicType.ULONG: (long_size, long_alignment), BasicType.LONGLONG: (8, 8), BasicType.ULONGLONG: (8, 8), BasicType.FLOAT: (4, 4), BasicType.DOUBLE: (double_size, double_alignment), BasicType.LONGDOUBLE: (10, 10), } int_map = {2: "h", 4: "i", 8: "q"} if self.arch_info.endianness == Endianness.LITTLE: byte_order = "<" else: byte_order = ">" if double_size == 4: ftype = "f" else: ftype = "d" ctypes = { BasicType.CHAR: "b", BasicType.UCHAR: "B", BasicType.SHORT: "h", BasicType.USHORT: "H", BasicType.INT: int_map[int_size].lower(), BasicType.UINT: int_map[int_size].upper(), "ptr": int_map[ptr_size].upper(), BasicType.LONG: "l", BasicType.ULONG: "L", BasicType.LONGLONG: "q", BasicType.ULONGLONG: "Q", BasicType.FLOAT: "f", BasicType.DOUBLE: ftype, } self.ctypes_names = {t: byte_order + v for t, v in ctypes.items()} def sizeof(self, typ: types.CType): """ Given a type, determine its size in whole bytes """ if not isinstance(typ, types.CType): raise TypeError("typ should be CType: {}".format(typ)) if isinstance(typ, types.ArrayType): element_size = self.sizeof(typ.element_type) if typ.size is None: self.error( "Size of array could not be determined!", typ.location ) if isinstance(typ.size, int): array_size = typ.size else: array_size = self.eval_expr(typ.size) return element_size * array_size elif isinstance(typ, types.BasicType): return self.type_size_map[typ.type_id][0] elif isinstance(typ, types.StructType): if not typ.complete: self.error("Storage size unknown", typ.location) return self.get_field_offsets(typ)[0] elif isinstance(typ, types.UnionType): if not typ.complete: self.error("Type is incomplete, size unknown", typ) return max(self.sizeof(part.typ) for part in typ.fields) elif isinstance(typ, types.EnumType): if not typ.complete: self.error("Storage size unknown", typ) # For enums take int as the type return self.arch_info.get_size("int") elif isinstance(typ, (types.PointerType, types.FunctionType)): return self.arch_info.get_size("ptr") else: # pragma: no cover raise NotImplementedError(str(typ)) def alignment(self, typ: types.CType): """ Given a type, determine its alignment in bytes """ assert isinstance(typ, types.CType) if isinstance(typ, types.ArrayType): return self.alignment(typ.element_type) elif isinstance(typ, types.BasicType): return self.type_size_map[typ.type_id][1] elif isinstance(typ, types.StructType): if not typ.complete: self.error("Storage size unknown", typ.location) return max(self.alignment(part.typ) for part in typ.fields) elif isinstance(typ, types.UnionType): if not typ.complete: self.error("Type is incomplete, size unknown", typ) return max(self.alignment(part.typ) for part in typ.fields) elif isinstance(typ, types.EnumType): if not typ.complete: self.error("Storage size unknown", typ) # For enums take int as the type return self.arch_info.get_alignment("int") elif isinstance(typ, (types.PointerType, types.FunctionType)): return self.arch_info.get_alignment("ptr") else: # pragma: no cover raise NotImplementedError(str(typ)) def layout_struct(self, typ): """ Layout the fields in the struct. Things to take in account: - alignment - bit packing - anonynous types """ kind = "struct" if isinstance(typ, types.StructType) else "union" bit_offsets = {} bit_offset = 0 # Offset in bits for field in typ.fields: # Calculate bit size: if field.bitsize: bitsize = self.eval_expr(field.bitsize) alignment = 1 # Bitfields are 1 bit aligned else: bitsize = self.sizeof(field.typ) * 8 alignment = self.alignment(field.typ) * 8 # alignment handling: bit_offset += required_padding(bit_offset, alignment) # We are now at the position of this field bit_offsets[field] = bit_offset if field.name is None: # If the field is anonymous, fill the offsets of named subfields: assert field.typ.is_struct_or_union _, sub_field_bit_offsets = self.layout_struct(field.typ) for ( sub_field, sub_field_bit_offset, ) in sub_field_bit_offsets.items(): bit_offsets[sub_field] = bit_offset + sub_field_bit_offset if kind == "struct": bit_offset += bitsize # TODO: should we take care here of maximum alignment as well? # Finally align at 8 bits: bit_offset += required_padding(bit_offset, 8) assert bit_offset % 8 == 0 byte_size = bit_offset // 8 return byte_size, bit_offsets def get_field_offsets(self, typ): """ Get a dictionary with offset of fields """ if typ not in self._field_offsets: size, offsets = self.layout_struct(typ) self._field_offsets[typ] = size, offsets return self._field_offsets[typ] def offsetof(self, typ, field): """ Returns the offset of a field in a struct/union in bytes """ field_offset = self.get_field_offsets(typ)[1][field] # Note that below assert will not always hold. # It is also used to create debug types. # assert field_offset % 8 == 0 return field_offset // 8 def has_field(self, typ, field_name): """ Check if the given type has the given field. """ if not typ.is_struct_or_union: raise TypeError("typ must be union or struct type") return typ.has_field(field_name) def get_field(self, typ, field_name): """ Get the given field. """ if not typ.is_struct_or_union: raise TypeError("typ must be union or struct type") if typ.has_field(field_name): return typ.get_field(field_name) raise KeyError(field_name) def get_enum_value(self, enum_typ, enum_constant): if enum_constant not in self._enum_values: self._calculate_enum_values(enum_typ) return self._enum_values[enum_constant] def _calculate_enum_values(self, ctyp): """ Determine enum values """ value = 0 for constant in ctyp.constants: if constant.value: value = self.eval_expr(constant.value) self._enum_values[constant] = value # Increase for next enum value: value += 1 def pack(self, typ, value): """ Pack a type into proper memory format """ if isinstance(typ, types.PointerType): tid = "ptr" else: assert isinstance(typ, types.BasicType) tid = typ.type_id fmt = self.ctypes_names[tid] # Check format with arch options: assert self.sizeof(typ) == struct.calcsize(fmt) return struct.pack(fmt, value) def _make_ival(self, typ, ival): """ Try to make ival a proper initializer """ if isinstance(ival, list): if isinstance(typ, types.ArrayType): elements = [self._make_ival(typ.element_type, i) for i in ival] ival = expressions.ArrayInitializer(typ, elements, None) elif isinstance(typ, types.StructType): ival2 = expressions.StructInitializer(typ, None) for field, value in zip(typ.fields, ival): value = self._make_ival(field.typ, value) ival2.values[field] = value ival = ival2 else: raise NotImplementedError(str(typ)) elif isinstance(ival, int): int_type = types.BasicType(types.BasicType.INT) ival = expressions.NumericLiteral(ival, int_type, None) return ival @staticmethod def error(message, location, hints=None): """ Trigger an error at the given location """ raise CompilerError(message, loc=location, hints=hints) def warning(self, message, location, hints=None): """ Trigger a warning at the given location """ # TODO: figure a nice way to gather warnings. self.logger.warning(message) self.logger.info("At: %s, hints: %s", location, hints) def eval_expr(self, expr): """ Evaluate an expression right now! (=at compile time) """ return self._expression_evaluator.eval_expr(expr)
{ "repo_name": "windelbouwman/ppci-mirror", "path": "ppci/lang/c/context.py", "copies": "1", "size": "10785", "license": "bsd-2-clause", "hash": 5998108085792696000, "line_mean": 37.3807829181, "line_max": 81, "alpha_frac": 0.5712563746, "autogenerated": false, "ratio": 4.063677467972871, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00004339901050256054, "num_lines": 281 }
# A continuacion el algoritmo de corte minimo # dado un grafo # La implementacion de este algoritmo fue desarrollada por Raul Bernardo # Rodas Herrera, el 20 de Septiembre del ano 2013. # Se importan las librerias correspondientes # Libreria para el manejo de grafos. # Para obtener valores aleatorios import random ################################################# # Esta funcion modifica el grafo dado un par de nodos para contraer def contraer(grafo, nodo_1, nodo_2): # Nodos unidos: En esta parte lo que se hace es ordenar el string # que contiene los nodos que estan unidos nuevo_nodo = "%s,%s" % (nodo_1, nodo_2) nuevo_nodo = sorted([int(i) for i in nuevo_nodo.split(',')]) nuevo_nodo = ','.join(str(i) for i in nuevo_nodo) # Listado de nodos que se mantienen y que se unen nodos = grafo.nodes() nodos_mantienen = set(nodos) - set([nodo_1]) - set([nodo_2]) nodos_mantienen = [i for i in nodos_mantienen] #nodos_mantienen = [i for i in nodos if (i != nodo_1 and i != nodo_2)] # Se crea el grafo contraido, vacio contraccion = grafo.subgraph(nodos_mantienen) # Se agrega el nodo que se merge/unio # contraccion.add_node(nuevo_nodo) # Se agregan los arcos con peso, del nuevo nodo # Esta parte genera un listado con las distancias de los nodos que se # unieron. iterador = grafo.edges_iter(nbunch=[nodo_1, nodo_2], data="weight") # Con esta linea agregar los arcos que correspondian a la fucion de nodos lista_agregar = [(i[0], i[1], i[2]['weight']) for i in iterador if ([i[0], i[1]] != [nodo_1, nodo_2] and [i[1], i[0]] != [nodo_1, nodo_2])] # Aqui se remplazan los nombres lista_agregar = [(nuevo_nodo, tupla[1], tupla[2]) if (tupla[0] == nodo_1 or tupla[0] == nodo_2) else (tupla[0], nuevo_nodo, tupla[2]) for tupla in lista_agregar] # Finalmente se agrega la lista contraccion.add_weighted_edges_from(lista_agregar) # Se retorna el grafico contraido return contraccion ################################################ def transformar_a_conj(elemento): """Esta funcion transforma el elemento rapidamente en un conjunto""" if type(elemento) == str: conjunto = set([int(i) for i in elemento.split(',')]) else: conjunto = set([elemento]) return conjunto # Funcion que chequea pertenencia def chequeadora_pertenencia(arco_inicial, arco_chequear): # que la condicionparta siendo False, significa que el archo a chequear no # deberia ser parte del listado de no seleccionables. condicion = False # Primero voy a transformar los nodos de "arco_chequear" en un conjunto conj_1 = transformar_a_conj(arco_chequear[0]) conj_2 = transformar_a_conj(arco_chequear[1]) # Lo mismo pero con el "arco_inicial" subconj_1 = transformar_a_conj(arco_inicial[0]) subconj_2 = transformar_a_conj(arco_inicial[1]) # Chequeo si los elemento de "arco_inicial" estan en "arco_chequear" if subconj_1.issubset(conj_1) and subconj_2.issubset(conj_2): condicion = True elif subconj_2.issubset(conj_1) and subconj_1.issubset(conj_2): condicion = True return condicion ################################################# # Aqui se define la funcion que genera un grafo contraido def contraer_hasta_2_nodos(grafo, arco_inicial=None): # Se estima este parametro, el cual es condicion de detencion num_vertices_grafo = len(grafo.nodes()) # Variable que indica si se es la primera iteracion o no primera_iteracion = True # Mientras la cantidad de vertices sea mayor estricto que 2, procede: while num_vertices_grafo > 2: # El siguiente es el conjunto de donde se elige el arco al azar, notese # que se sustrae el arco seleccionado como arco_inicial if primera_iteracion: # Al final de esta iteracion la variable se marca como False conjunto_eleccion_arco = set(grafo.edges()).difference( [arco_inicial]) listado_seleccion = list(conjunto_eleccion_arco) elem_rand = random.choice(listado_seleccion) indice_arco_azar = grafo.edges().index(elem_rand) primera_iteracion = False # Puesto que se esta en la segunda iteracion, o mayor else: # Se crea/limpia el listado_no_elegible listado_no_elegible = [] # Aqui comienzan las modificaciones respectivas. for arco in grafo.edges(): # Este se_elige_arco puede parecer confuso, en realidad es un # se elige para no ser elegido, es por eso que si es True la # respuesta de la funcion para a incorporarse al listado de no # seleccionados. se_elige_arco = chequeadora_pertenencia(arco_inicial, arco) if se_elige_arco: listado_no_elegible.append(arco) conjunto_eleccion_arco = set(grafo.edges()).difference( listado_no_elegible) listado_seleccion = list(conjunto_eleccion_arco) elem_rand = random.choice(listado_seleccion) indice_arco_azar = grafo.edges().index(elem_rand) primera_iteracion = False nodo_1 = grafo.edges()[indice_arco_azar][0] nodo_2 = grafo.edges()[indice_arco_azar][1] # Contrae en UN SOLO nodo los nodos del vertice elegido. # Recordar que esto actualiza el grafo grafo = contraer(grafo, nodo_1, nodo_2) # Se estima este parametro, el cual es condicion de detencion num_vertices_grafo = len(grafo.nodes()) # Retorna el grafo final return grafo ################################################# # Aqui se define la funcion, recordar que G, # es una instancia "grafo" del tipo "networkX" def min_cut(grafo, num_iteraciones=1000, arco_inicial=None): # Suma del corte inicial, esto es infinito suma_corte_minimo = 9999999999999999 # Se establece el conjunto de arcos cortados de manera vacia conj_arcos_cortados = set() ### ESTE FOR ES PARALELIZABLE ### # Numero de iteraciones buscando el valor minimo for iteracion in range(num_iteraciones): # Se contrae el grafo resultado = contraer_hasta_2_nodos(grafo, arco_inicial) # Suma del MultiGrafo resultante. iterador = resultado.edges_iter(data="weight") suma_corte = sum([i[2]["weight"] for i in iterador]) # Si se supera el valor anterior if suma_corte < suma_corte_minimo: # Suma del corte suma_corte_minimo = suma_corte # Se guarda el mejor grafo hasta el minuto grafo_final = resultado ################################################## # A continuacion lleno el conjunto de arcos cortados # Si el primer nodo de los dos resultantes es un entero: nodo_1 = grafo_final.nodes()[0] nodo_2 = grafo_final.nodes()[1] if type(nodo_1) == int: # El segundo obligadamente es un String conj_arcos_cortados = set( [tuple(sorted((nodo_1, int(i)))) for i in nodo_2.split(',')]) # si no es un entero, entonces es un String else: # en ese caso el segundo puede ser un entero if type(nodo_2) == int: # en cuyo caso se hace la misma idea que el primer "if" conj_arcos_cortados = set( [tuple(sorted((int(i), nodo_2))) for i in nodo_1.split(',')]) # Por otro lado si es tambien un String else: conj_arcos_cortados = set( [tuple(sorted((int(i), int(j)))) for i in nodo_1.split(',') for j in nodo_2.split(',')]) # Se asignan nuevos atributos grafo_final.Suma_corte_minimo = suma_corte_minimo grafo_final.conj_arcos_cortados = conj_arcos_cortados return grafo_final
{ "repo_name": "BRodas/k-cut", "path": "lib_python/Min_Cut_Kargers.py", "copies": "1", "size": "8073", "license": "mit", "hash": -6986822691029362000, "line_mean": 38, "line_max": 79, "alpha_frac": 0.6055989099, "autogenerated": false, "ratio": 3.0464150943396224, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.41520140042396225, "avg_score": null, "num_lines": null }
#A continuación se muestra el código que se uso para obtener los mapas HSV #y HSL. También se muestra el código para obtener el Histogram of oriented #gradients, Histograma para Local binary patterns y un histograma de la #imagen en escala de gris. Sin embargo, aunque no esta documentado, #se obtuvo en pruebas que su precisión no superaba el 90%. Debido a esto #Se enfoco los modelos a las Redes Neuronales #Se importa las librerías a utilizar import os import cv2 import numpy as np #################################################### #Este es el codigo para obtener el Histogram of oriented gradients def hog(img): rows,cols=img.shape gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) mag, ang = cv2.cartToPolar(gx, gy) ang=ang*180/np.pi hog=[0,0,0,0,0,0,0,0] for j in range (0,rows): for k in range (0,cols): if (ang[j,k]==0): hog[0]=hog[0]+mag[j,k] elif ((ang[j,k]>0) and (ang[j,k]<45)): hog[0]=hog[0]+((45-ang[j,k])/45)*mag[j,k] hog[1]=hog[1]+((ang[j,k]-0)/45)*mag[j,k] elif (ang[j,k]==45): hog[1]=hog[1]+mag[j,k] elif ((ang[j,k]>45) and (ang[j,k]<90)): hog[1]=hog[1]+((90-ang[j,k])/45)*mag[j,k] hog[2]=hog[2]+((ang[j,k]-45)/45)*mag[j,k] elif (ang[j,k]==90): hog[2]=hog[2]+mag[j,k] elif ((ang[j,k]>90) and (ang[j,k]<135)): hog[2]=hog[2]+((135-ang[j,k])/45)*mag[j,k] hog[3]=hog[3]+((ang[j,k]-90)/45)*mag[j,k] elif (ang[j,k]==135): hog[3]=hog[3]+mag[j,k] elif ((ang[j,k]>135) and (ang[j,k]<180)): hog[3]=hog[3]+((180-ang[j,k])/45)*mag[j,k] hog[4]=hog[4]+((ang[j,k]-135)/45)*mag[j,k] elif (ang[j,k]==180): hog[4]=hog[4]+mag[j,k] elif ((ang[j,k]>180) and (ang[j,k]<225)): hog[4]=hog[4]+((225-ang[j,k])/45)*mag[j,k] hog[5]=hog[5]+((ang[j,k]-180)/45)*mag[j,k] elif (ang[j,k]==225): hog[5]=hog[5]+mag[j,k] elif ((ang[j,k]>225) and (ang[j,k]<270)): hog[5]=hog[5]+((270-ang[j,k])/45)*mag[j,k] hog[6]=hog[6]+((ang[j,k]-225)/45)*mag[j,k] elif (ang[j,k]==270): hog[6]=hog[6]+mag[j,k] elif ((ang[j,k]>270) and (ang[j,k]<315)): hog[6]=hog[6]+((315-ang[j,k])/45)*mag[j,k] hog[7]=hog[7]+((ang[j,k]-270)/45)*mag[j,k] elif (ang[j,k]==315): hog[7]=hog[7]+mag[j,k] elif ((ang[j,k]>315) and (ang[j,k]<361)): hog[7]=hog[7]+((361-ang[j,k])/45)*mag[j,k] hog[0]=hog[0]+((ang[j,k]-315)/45)*mag[j,k] else: print('error de angulo y magnitud') hog=hog/np.linalg.norm(hog) return hog ###################################### #Este el código para obtener un histograma de Local Binnary patterns def getlbp(img): rows,cols=img.shape lbp=np.zeros((rows,cols),dtype=np.uint8) for i in range (1,rows-1): for j in range (1,cols-1): a=img[i,j] if a<img[i-1,j-1]: lbp[i,j]=lbp[i,j]+127 if a<img[i,j-1]: lbp[i,j]=lbp[i,j]+0 if a<img[i+1,j-1]: lbp[i,j]=lbp[i,j]+2 if a<img[i-1,j]: lbp[i,j]=lbp[i,j]+64 if a<img[i+1,j]: lbp[i,j]=lbp[i,j]+4 if a<img[i-1,j+1]: lbp[i,j]=lbp[i,j]+32 if a<img[i,j+1]: lbp[i,j]=lbp[i,j]+16 if a<img[i+1,j+1]: lbp[i,j]=lbp[i,j]+8 hist=cv2.calcHist([lbp],[0],None,[256],[0,256]) hist=hist/np.linalg.norm(hist) return hist ############################################# #Este es el código para obtener el mapa de color HSV def getmapahsv(img): hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) hist = cv2.calcHist( [hsv], [0, 1], None, [45, 64], [0, 180, 0, 256] ) hist = hist/np.linalg.norm(hist) return hist ################################################# #Este es el código para obtener el mapa de color HSL def getmapahsl(img): hsl = cv2.cvtColor(img,cv2.COLOR_BGR2HLS) hist = cv2.calcHist( [hsl], [0, 2], None, [45, 64], [0, 180, 0, 256] ) hist = hist/np.linalg.norm(hist) return hist ##################################################### #Este es el código para obtener el histograma de color HSL def gethslhuehist(img): hsl = cv2.cvtColor(img,cv2.COLOR_BGR2HLS) hist = cv2.calcHist( [hsl], [0], None, [180], [0, 180] ) hist=hist/np.linalg.norm(hist) return hist #################################################### #Este es el código para obtener el histograma de la imagen de escala de grises def getgrayhist(img): gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) hist=cv2.calcHist( [gray], [0], None, [256], [0, 256] ) hist = hist/np.linalg.norm(hist) return hist #################################################### raw_data_dir='Data/Raw/' destiny_data_dir='Data/' #Ahora se muestra el código que usa las funciones anteriores para genera la Data #para que Keras lo use. for dir1 in ['train/','validation/']: for dir2 in['negativo/','positivo/']: db1=[] db2=[] for filename in os.listdir(raw_data_dir+dir1+dir2): path_x=raw_data_dir+dir1+dir2+str(filename) img=cv2.imread(path_x,1) mapahsl=getmapahsl(img) mapahsv=getmapahsv(img) db1.append(mapahsl) db2.append(mapahsv) np.save(destiny_data_dir+'mapahsv/'+dir1+dir2+'db.npy',db2) np.save(destiny_data_dir+'mapahsl/'+dir1+dir2+'db.npy',db1)
{ "repo_name": "a-bacilio/Codigo-de-tesis-descarte", "path": "obtener_descriptores.py", "copies": "1", "size": "5970", "license": "mit", "hash": -3389036735799320000, "line_mean": 39.6783216783, "line_max": 80, "alpha_frac": 0.4796911715, "autogenerated": false, "ratio": 2.5836947094535994, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.35633858809535995, "avg_score": null, "num_lines": null }
"""A contour component. This component wraps around the tvtk.ContourFilter and provides convenient options to either automatically generate a specified number of contours between a given minimum and maximum value or explicitly specify the contours. This component may be used for any input data. The component also provides a convenient option to create "filled contours". """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005, Enthought, Inc. # License: BSD Style. # Standard library imports. import numpy # Enthought library imports. from traits.api import Instance, List, Tuple, Bool, Range, \ Float, Property from tvtk.api import tvtk # Local imports. from mayavi.core.component import Component from mayavi.core.common import error from mayavi.components.common \ import get_module_source, convert_to_poly_data ###################################################################### # `Contour` class. ###################################################################### class Contour(Component): # The version of this class. Used for persistence. __version__ = 0 # The contour filter being currently used. contour_filter = Property # Specify if filled contours are generated. filled_contours = Bool(False, desc='if filled contours are '\ 'to be generated') # Specify if contours are generated explicitly or automatically. auto_contours = Bool(False, desc='if contours are '\ 'given explicitly or automatically computed') # Number of contours, used when `auto_contours` are chosen. number_of_contours = Range(1, 100000, enter_set=True, auto_set=False, desc='number of contours to generate') # Minimum contour, this is the starting value when `auto_contours` # is turned on. minimum_contour = Range(value=0.0, low='_data_min', high='_data_max', enter_set=True, auto_set=False, desc='the starting contour value') # Maximum contour, this is the last contour when `auto_contours` # is turned on. maximum_contour = Range(value=0.0, low='_data_min', high='_data_max', enter_set=True, auto_set=False, desc='the ending contour value') # The explicit contours to create. These specify the contours # explicitly and are used when `auto_contours` is turned off. The # traits of the items in the list are dynamically generated based # on input data. contours = List(Range(value='_default_contour', low='_data_min', high='_data_max', enter_set=True, auto_set=False, ), rows=3, desc='explicitly the contours to be generated') # Specify if the filled contour option should be shown in the view # or not. This is useful in situations like the iso_surface # module where it does not make sense to use filled contours at # all. show_filled_contours = Bool(True) # Specify if the lower and upper bound for the data is to be # automatically reset or not. auto_update_range = Bool(True, desc='if the contour range is updated automatically') ######################################## # The component's view #view = View(Group(Item(name='filled_contours', # defined_when='show_filled_contours'), # Item(name='auto_contours'), '_', # Item(name='contours', # style='custom', # visible_when='not auto_contours'), # Item(name='number_of_contours', # visible_when='auto_contours'), # Item(name='minimum_contour', # visible_when='auto_contours'), # Item(name='maximum_contour', # visible_when='auto_contours'), # Item(name='auto_update_range'), # Item(name='_data_min', # label='Data minimum', # visible_when='not auto_update_range'), # Item(name='_data_max', # label='Data maximum', # visible_when='not auto_update_range'), # ) # ) ######################################## # Private traits. _current_range = Tuple # The minimum value of the input data. Set to a very large negative value # to avoid errors prior to the object being added to the mayavi # tree. _data_min = Float(-1e20, enter_set=True, auto_set=False) # The maximum value of the input data. Set to a very large value # to avoid errors prior to the object being added to the mayavi # tree. _data_max = Float(1e20, enter_set=True, auto_set=False) # The default value of the contour to add, this property is computed # from the _data_min and _data_max traits and used when the user # adds a contour manually from the UI when auto_contours are turned # off. _default_contour = Property(Float) # The contour filter. _cont_filt = Instance(tvtk.ContourFilter, args=()) # The filled contour filter. This filter generates the filled contours. _fill_cont_filt = Instance(tvtk.BandedPolyDataContourFilter, args=(), kw={'clipping': 1, 'scalar_mode':'value'}) ###################################################################### # `object` interface ###################################################################### def __get_pure_state__(self): d = super(Contour, self).__get_pure_state__() # These traits are dynamically created. for name in ('_data_min', '_data_max', '_default_contour'): d.pop(name, None) return d ###################################################################### # `Component` interface ###################################################################### def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when the input fires a `pipeline_changed` event. """ if not self._has_input(): return cf = self._set_contour_input() first = False if len(self._current_range) == 0: first = True self._update_ranges() # If this is the first time, create a default contour if first: cr = self._current_range self.contours = [(cr[0] + cr[1])/2] self.minimum_contour = cr[0] self.maximum_contour = cr[1] self.outputs = [cf.output] def update_data(self): """Override this method to do what is necessary when upstream data changes. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ self._update_ranges() # Propagage the data changed event. self.data_changed = True def has_output_port(self): """ The contour filter has an output port.""" return True def get_output_object(self): """ Returns the output port.""" return self.contour_filter.output_port ###################################################################### # Non-public methods. ###################################################################### def _contours_items_changed(self, list_event): if self.auto_contours or not self._has_input(): return cf = self.contour_filter added, removed, index = (list_event.added, list_event.removed, list_event.index) if len(added) == len(removed): cf.set_value(index, added[0]) cf.update() self.data_changed = True else: self._contours_changed(self.contours) def _contours_changed(self, values): if self.auto_contours or not self._has_input(): return cf = self.contour_filter cf.number_of_contours = len(values) for i, x in enumerate(values): cf.set_value(i, x) cf.update() self.data_changed = True def _update_ranges(self): # Here we get the module's source since the input of this # component may not in general represent the entire object. if not self.auto_update_range: return src = get_module_source(self.inputs[0]) sc = src.outputs[0].point_data.scalars if sc is not None: sc_array = sc.to_array() has_nan = numpy.isnan(sc_array).any() if has_nan: rng = (float(numpy.nanmin(sc_array)), float(numpy.nanmax(sc_array))) else: rng = sc.range else: error('Cannot contour: No scalars in input data!') rng = (0.0, 1.0) if rng != self._current_range: self.set(_data_min=rng[0], _data_max=rng[1], trait_change_notify=False) self._clip_contours(rng) self._current_range = rng def _minimum_contour_changed(self, value): self._do_auto_contours() def _maximum_contour_changed(self, value): self._do_auto_contours() def _number_of_contours_changed(self, value): self._do_auto_contours() def _auto_contours_changed(self, value): if value: self._do_auto_contours() else: self._contours_changed(self.contours) def _auto_update_range_changed(self, value): if value: rng = self._data_min, self._data_max self._current_range = rng self._update_ranges() self.trait_property_changed('_data_min', rng[0], self._data_min) self.trait_property_changed('_data_max', rng[1], self._data_max) def _do_auto_contours(self): if not self._has_input(): return if self.auto_contours: minc, maxc = self.minimum_contour, self.maximum_contour self.contour_filter.generate_values(self.number_of_contours, min(minc, maxc), max(minc, maxc)) self.data_changed = True def _filled_contours_changed(self, val): if not self._has_input(): return cf = self._set_contour_input() # This will trigger a change. self._auto_contours_changed(self.auto_contours) self.outputs = [cf.output] def _get_contour_filter(self): if self.filled_contours: return self._fill_cont_filt else: return self._cont_filt def _set_contour_input(self): """Sets the input to the appropriate contour filter and returns the currently used contour filter. """ inp = self.inputs[0].outputs[0] cf = self.contour_filter if self.filled_contours: inp = convert_to_poly_data(inp) self.configure_input_data(cf, inp) else: self.configure_connection(cf, self.inputs[0]) cf.update() return cf def _has_input(self): """Returns if this component has a valid input.""" if (len(self.inputs) > 0) and \ (len(self.inputs[0].outputs) > 0): return True else: return False def _clip_contours(self, rng): """Clips the contour related values when the data range has changed. The new range is given as the argument. """ ctr = [] dmin, dmax = rng ctr = [min(max(x, dmin), dmax) for x in self.contours] if self.auto_contours or ctr != self.contours: self.contours = ctr self.set(minimum_contour=self._data_min, maximum_contour=self._data_max, trait_change_notify=False) self._do_auto_contours() def _get__default_contour(self): return (self._data_min + self._data_max)*0.5
{ "repo_name": "liulion/mayavi", "path": "mayavi/components/contour.py", "copies": "3", "size": "12760", "license": "bsd-3-clause", "hash": -7955994203910183000, "line_mean": 36.7514792899, "line_max": 82, "alpha_frac": 0.5257053292, "autogenerated": false, "ratio": 4.3833734111989004, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.64090787403989, "avg_score": null, "num_lines": null }
"""A contour grid plane module. This module lets one take a slice of input grid data and view contours of the data. The module only works for structured points, rectilinear grid and structured grid input. """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005-2006, Enthought, Inc. # License: BSD Style. # Enthought library imports. from traits.api import Instance, Bool from traitsui.api import View, Group, Item # Local imports from mayavi.core.module import Module from mayavi.components.grid_plane import GridPlane from mayavi.components.contour import Contour from mayavi.components.actor import Actor from mayavi.core.pipeline_info import PipelineInfo ###################################################################### # `ContourGridPlane` class. ###################################################################### class ContourGridPlane(Module): # The version of this class. Used for persistence. __version__ = 0 # The grid plane component. grid_plane = Instance(GridPlane, allow_none=False, record=True) # Specifies if contouring is to be done or not. enable_contours = Bool(True, desc='if contours are generated') # The contour component that contours the data. contour = Instance(Contour, allow_none=False, record=True) # The actor component that represents the visualization. actor = Instance(Actor, allow_none=False, record=True) input_info = PipelineInfo(datasets=['image_data', 'structured_grid', 'rectilinear_grid'], attribute_types=['any'], attributes=['any']) view = View([Group(Item(name='grid_plane', style='custom'), show_labels=False), Group(Item(name='enable_contours')), Group(Item(name='contour', style='custom', enabled_when='object.enable_contours'), Item(name='actor', style='custom'), show_labels=False) ] ) ###################################################################### # `Module` interface ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* the tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. You should also set the `actors` attribute up at this point. """ # Create the components self.grid_plane = GridPlane() self.contour = Contour(auto_contours=True, number_of_contours=10) self.actor = Actor() def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when any of the inputs sends a `pipeline_changed` event. """ mm = self.module_manager if mm is None: return # Data is available, so set the input for the grid plane. self.grid_plane.inputs = [mm.source] # This makes sure that any changes made to enable_contours # when the module is not running are updated when it is # started. self._enable_contours_changed(self.enable_contours) # Set the LUT for the mapper. self.actor.set_lut(mm.scalar_lut_manager.lut) self.pipeline_changed = True def update_data(self): """Override this method so that it flushes the vtk pipeline if that is necessary. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Just set data_changed, the components should do the rest if # they are connected. self.data_changed = True ###################################################################### # Non-public methods. ###################################################################### def _filled_contours_changed(self, value): """When filled contours are enabled, the mapper should use the the cell data, otherwise it should use the default scalar mode. """ if value: self.actor.mapper.scalar_mode = 'use_cell_data' else: self.actor.mapper.scalar_mode = 'default' self.render() def _enable_contours_changed(self, value): """Turns on and off the contours.""" if self.module_manager is None: return if value: self.actor.inputs = [self.contour] if self.contour.filled_contours: self.actor.mapper.scalar_mode = 'use_cell_data' else: self.actor.inputs = [self.grid_plane] self.actor.mapper.scalar_mode = 'default' self.render() def _grid_plane_changed(self, old, new): cont = self.contour if cont is not None: cont.inputs = [new] self._change_components(old, new) def _contour_changed(self, old, new): if old is not None: old.on_trait_change(self._filled_contours_changed, 'filled_contours', remove=True) new.on_trait_change(self._filled_contours_changed, 'filled_contours') # Setup the contours input. gp = self.grid_plane if gp is not None: new.inputs = [gp] # Setup the actor. actor = self.actor if actor is not None: actor.inputs = [new] self._change_components(old, new) def _actor_changed(self, old, new): if old is None: # First time this is set. new.property.set(line_width=2.0) # Set the actors scene and input. new.scene = self.scene cont = self.contour if cont is not None: new.inputs = [cont] self._change_components(old, new)
{ "repo_name": "alexandreleroux/mayavi", "path": "mayavi/modules/contour_grid_plane.py", "copies": "3", "size": "6426", "license": "bsd-3-clause", "hash": -4767929185870938000, "line_mean": 36.1445086705, "line_max": 74, "alpha_frac": 0.5577342048, "autogenerated": false, "ratio": 4.4937062937062935, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6551440498506294, "avg_score": null, "num_lines": null }
"""A controller for watering a plant. This acts a both a source (current output state) and a sink (receives sensor data to act on). """ import atexit import logging from threading import Lock, Timer from RPi import GPIO from sensor_feed.sensor import SleepingSensor from sensor_feed.sink import Sink LOGGER = logging.getLogger(__name__) GPIO.setmode(GPIO.BCM) class PlantControl(SleepingSensor, Sink): """A controller to water a plant.""" param_name = 'water_input' param_id = 'water_input' param_unit = 'seconds' trigger_param = 'soil' def __init__(self, *args, **kwargs): super(SleepingSensor, self).__init__(*args, **kwargs) super(Sink, self).__init__(*args, **kwargs) self._water_input = 0 self.threshold = 1300 self.water_period = 20 self.min_period = self.water_period + 2 self._watering = Lock() self.gpio_pin = 17 GPIO.setup(self.gpio_pin, GPIO.OUT) atexit.register(GPIO.cleanup) def get_value(self): last_water = self._water_input self._water_input = 0 return last_water def process_value(self, param_name, timestamp, value): # only interested in one parameter if param_name != self.trigger_param: return if value > self.threshold: self.apply_water() def apply_water(self): LOGGER.critical('Applying water.') self._water_input += self.water_period if self._watering.locked(): LOGGER.critical('Already watering.') return self._watering.acquire() # turn on water supply. GPIO.output(self.gpio_pin, GPIO.HIGH) LOGGER.critical('Tap on.') timer = Timer(self.water_period, self._stop) timer.start() def _stop(self): LOGGER.critical('Tap off.') GPIO.output(self.gpio_pin, GPIO.LOW) self._watering.release() def __del__(self): GPIO.output(self.gpio_pin, GPIO.LOW) LOGGER.critical('Ensure tap off.')
{ "repo_name": "dmkent/sensor-feed", "path": "sensor_feed/plant_control.py", "copies": "1", "size": "2050", "license": "mit", "hash": -6450536561402441000, "line_mean": 25.9736842105, "line_max": 76, "alpha_frac": 0.6117073171, "autogenerated": false, "ratio": 3.7545787545787546, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9864719655638654, "avg_score": 0.0003132832080200501, "num_lines": 76 }
"""A convenience class for a GTK 3 system tray indicator.""" from pkg_resources import resource_filename import gi gi.require_version('Gtk', '3.0') # noqa from gi.repository import Gtk from gi.repository.GdkPixbuf import Pixbuf class Indicator: """This class defines a standard GTK3 system tray indicator. Class Indicator can be easily reused in any other project. """ def __init__(self, icon, title=None): """Create indicator icon and add menu. Args: icon (str): path to initial icon that will be shown on system panel """ self._icon_cache = {} self.icon = Gtk.StatusIcon.new_from_pixbuf(self._get_icon(icon)) self.menu = Gtk.Menu() self.icon.connect('activate', self.on_popup_menu_open) self.icon.connect('popup-menu', self.on_popup_menu_open) if title: self.icon.set_title(title) def _get_icon(self, icon): """Return icon from package as GdkPixbuf.Pixbuf. Extracts the image from package to a file, stores it in the icon cache if it's not in there yet and returns it. Otherwise just returns the image stored in the cache. """ if icon not in self._icon_cache: filename = resource_filename(__name__, "images/%s" % icon) self._icon_cache[icon] = Pixbuf.new_from_file(filename) return self._icon_cache[icon] def set_icon(self, icon): """Set new icon in system tray. Args: icon (str): path to file with new icon """ self.icon.set_from_pixbuf(self._get_icon(icon)) def set_tooltip(self, callback): self.icon.set_has_tooltip(True) self.icon.connect("query-tooltip", callback) def clear_menu(self): """Clear all entries from the main menu.""" self.menu = Gtk.Menu() def add_menu_item(self, command=None, title=None, icon=None, enabled=True, is_check=False, active=False, menu=None, data=None): """Add mouse right click menu item. Args: command (callable): function that will be called after left mouse click on title title (str): label that will be shown in menu icon (str): name of icon stored in application package active (bool): whether the menu entry can be activated (default: True) data (obj): arbitrary data to associate with the menu entry """ if icon: m_item = Gtk.ImageMenuItem(title) image = Gtk.Image.new_from_pixbuf(self._get_icon(icon)) m_item.set_image(image) elif is_check: m_item = Gtk.CheckMenuItem(title) m_item.set_active(active) else: m_item = Gtk.MenuItem(title) if command: m_item.connect('toggled' if is_check else 'activate', command) m_item.set_sensitive(enabled) m_item.data = data if menu: menu.append(m_item) else: self.menu.append(m_item) return m_item def add_submenu(self, title): """Add a sub menu popup menu.""" submenu = Gtk.Menu() m_item = Gtk.MenuItem(title) m_item.set_submenu(submenu) self.menu.append(m_item) return submenu def add_separator(self): """Add separator between labels in the popup menu.""" m_item = Gtk.SeparatorMenuItem() self.menu.append(m_item) def on_popup_menu_open(self, widget=None, button=None, *args): """Some action requested opening the popup menu.""" self.menu.popup(None, None, Gtk.StatusIcon.position_menu, widget or self.icon, button or 1, Gtk.get_current_event_time()) def on_popup_menu_close(self, widget=None, button=None, *args): """Some action requested closing the popup menu.""" self.menu.popdown()
{ "repo_name": "SpotlightKid/jack-select", "path": "jackselect/indicator.py", "copies": "1", "size": "3938", "license": "mit", "hash": 3091213283380261400, "line_mean": 31.5454545455, "line_max": 94, "alpha_frac": 0.5977653631, "autogenerated": false, "ratio": 3.910625620655412, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5008390983755412, "avg_score": null, "num_lines": null }
""" a convenience layer for creating data in the core and debugging it """ import collections from decorator import decorator from gatesym import core class Node(object): """ a point in the network of gates """ def __init__(self, name): self.name = name self.outputs = [] self.inputs = [] self.block = None def attach_output(self, output): """ connect an output at the logical level, output can be any node """ self.outputs.append(output) output.inputs.append(self) def connect_output(self, output, negate): """ connect an output at the phycical level, output must be a gate """ raise NotImplementedError @property def all_outputs(self): return self.outputs def find(self, path, location=""): """ look up a related node by path """ if location: location = location + "." location = location + self.name parts = path.split(".", 1) head = parts[0] tail = parts[1] if len(parts) > 1 else "" if head: for l in self.all_outputs: if l.name == head: return l.find(tail, location) else: raise ValueError( "at " + location + " expected one of " + repr([o.name for o in self.outputs]) ) else: return self def list(self, path): """ look up a related node by path and list it's outputs """ return [o.name for o in self.find(path).all_outputs] def watch(self, name): """ set a watch on this node """ self.network.watch(self.index, name, False) def full_name(self): """ trace the first inputs back until we find a node with no inputs and return the path from there to here this could be better, jumping across blocks for example """ possible_inputs = [i for i in self.inputs if ")" not in i.name] if possible_inputs: print([i.name for i in self.inputs]) return possible_inputs[0].full_name() + "." + self.name else: print([i.name for i in self.inputs]) return self.name class Gate(Node): """ handles to gates in the core """ def __init__(self, network, index, name, inputs=[]): super().__init__(name) self.network = network self.index = index for input_ in inputs: input_.attach_output(self) input_.connect_output(self, False) def __repr__(self): return "{self.__class__.__name__}<{self.index}>({value})".format( self=self, value=self.read() ) def read(self): return self.network.read(self.index) def connect_output(self, output, negate): self.network.add_link(self.index, output.index, negate) class Tie(Gate): def __init__(self, network, value): value = bool(value) index = network.add_gate(core.TIE, self) super().__init__(network, index, "tie") self.network.write(self.index, value) class Switch(Gate): def __init__(self, network, value=False): value = bool(value) index = network.add_gate(core.SWITCH, self) super().__init__(network, index, "switch") self.write(value) def write(self, value): self.network.write(self.index, value) class And(Gate): def __init__(self, *inputs): assert inputs network = inputs[0].network index = network.add_gate(core.AND, self) super().__init__(network, index, "and", inputs) class Or(Gate): def __init__(self, *inputs): assert inputs network = inputs[0].network index = network.add_gate(core.OR, self) super().__init__(network, index, "or", inputs) def nand(*inputs): return Not(And(*inputs)) class Link(Node): """ interesting steps along the path between two gates """ def __init__(self, node, name, block, is_output): super().__init__(name) self.block = block self.is_output = is_output self.node = node node.attach_output(self) @property def network(self): return self.node.network def read(self): return self.node.read() def connect_output(self, output, negate): return self.node.connect_output(output, negate) @property def index(self): return self.node.index @property def all_outputs(self): if self.block and not self.is_output: return self.block.outputs + self.outputs else: return self.outputs class Not(Link): def __init__(self, node): super().__init__(node, "not", None, False) def read(self): return not self.node.read() def connect_output(self, output, negate): return self.node.connect_output(output, not negate) def watch(self, name): """ set a watch on this node """ self.network.watch(self.index, name, True) class Placeholder(Node): """ a placeholder we will replace with a real node later """ def __init__(self, network): super().__init__("placeholder") self.network = network self.connected = [] self.attached = [] self.actual = None def attach_output(self, output): """ connect an output at the logical level, output can be any node """ if self.actual: self.actual.attach_output(output) else: self.attached.append(output) def connect_output(self, output, negate): if self.actual: self.actual.connect_output(output, negate) else: self.connected.append((output, negate)) def replace(self, input): assert not self.actual self.actual = input for o in self.attached: input.attach_output(o) for o, n in self.connected: input.connect_output(o, n) def __getattr__(self, name): assert self.actual return getattr(self.actual, name) def link_factory(obj, name1, name2, block, is_output): """ wrap links around a bunch of nodes in an arbitrarily nested structure """ if isinstance(obj, collections.Iterable): if name1 and not name1.endswith("("): name1 = name1 + "," return [ link_factory(o, name1 + str(i), name2, block, is_output) for i, o in enumerate(obj) ] elif isinstance(obj, Node): link = Link(obj, name1 + name2, block, is_output) if is_output: block.outputs.append(link) else: block.inputs.append(link) return link else: return obj class Block(object): """ wrapper around a functional block, intended to be used via the decorator below """ def __init__(self, name): self.name = name self.outputs = [] self.inputs = [] self.size = None def _find_network(thing): """ given a bunch of nested stuff find one that has a network property and return it the existance of this speaks to issues with how I'm handling the relations between blocks and nodes and the network """ if hasattr(thing, "network"): return thing.network if isinstance(thing, collections.Iterable): for item in thing: tmp = _find_network(item) if tmp: return tmp return None def _block(func, *args): network = _find_network(args) old_size = network.get_size() block = Block(func.__name__) args = link_factory(args, func.__name__ + "(", "", block, False) res = func(*args) res = link_factory(res, "", ")", block, True) block.size = network.get_size() - old_size return res def block(func): return decorator(_block, func)
{ "repo_name": "tolomea/gatesym", "path": "gatesym/gates.py", "copies": "1", "size": "7919", "license": "mit", "hash": -425313775395992770, "line_mean": 26.9823321555, "line_max": 119, "alpha_frac": 0.5724207602, "autogenerated": false, "ratio": 4.046499744506899, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0002753788226358707, "num_lines": 283 }
"""A convenience which constructs expression trees from an easy-to-read syntax Use this unless you have a compelling reason not to; it performs some optimizations that would be tedious to do when constructing an expression tree by hand. """ from collections import Mapping from inspect import isfunction, ismethod from parsimonious.exceptions import BadGrammar, UndefinedLabel from parsimonious.expressions import (Literal, Regex, Sequence, OneOf, Lookahead, Optional, ZeroOrMore, OneOrMore, Not, TokenMatcher, expression) from parsimonious.nodes import NodeVisitor from parsimonious.utils import StrAndRepr, evaluate_string from six import text_type, iterkeys, itervalues, iteritems, python_2_unicode_compatible @python_2_unicode_compatible class Grammar(StrAndRepr, Mapping): """A collection of rules that describe a language You can start parsing from the default rule by calling ``parse()`` directly on the ``Grammar`` object:: g = Grammar(''' polite_greeting = greeting ", my good " title greeting = "Hi" / "Hello" title = "madam" / "sir" ''') g.parse('Hello, my good sir') Or start parsing from any of the other rules; you can pull them out of the grammar as if it were a dictionary:: g['title'].parse('sir') You could also just construct a bunch of ``Expression`` objects yourself and stitch them together into a language, but using a ``Grammar`` has some important advantages: * Languages are much easier to define in the nice syntax it provides. * Circular references aren't a pain. * It does all kinds of whizzy space- and time-saving optimizations, like factoring up repeated subexpressions into a single object, which should increase cache hit ratio. [Is this implemented yet?] """ def __init__(self, rules='', **more_rules): """Construct a grammar. :arg rules: A string of production rules, one per line. :arg default_rule: The name of the rule invoked when you call :meth:`parse()` or :meth:`match()` on the grammar. Defaults to the first rule. Falls back to None if there are no string-based rules in this grammar. :arg more_rules: Additional kwargs whose names are rule names and values are Expressions or custom-coded callables which accomplish things the built-in rule syntax cannot. These take precedence over ``rules`` in case of naming conflicts. """ decorated_custom_rules = dict( (k, expression(v, k, self) if isfunction(v) or ismethod(v) else v) for k, v in iteritems(more_rules)) self._expressions, first = self._expressions_from_rules(rules, decorated_custom_rules) self.default_rule = first # may be None def __getitem__(self, rule_name): return self._expressions[rule_name] def __iter__(self): return iterkeys(self._expressions) def __len__(self): return len(self._expressions) def default(self, rule_name): """Return a new Grammar whose :term:`default rule` is ``rule_name``.""" new = self._copy() new.default_rule = new[rule_name] return new def _copy(self): """Return a shallow copy of myself. Deep is unnecessary, since Expression trees are immutable. Subgrammars recreate all the Expressions from scratch, and AbstractGrammars have no Expressions. """ new = Grammar(**self._expressions) new.default_rule = self.default_rule return new def _expressions_from_rules(self, rules, custom_rules): """Return a 2-tuple: a dict of rule names pointing to their expressions, and then the first rule. It's a web of expressions, all referencing each other. Typically, there's a single root to the web of references, and that root is the starting symbol for parsing, but there's nothing saying you can't have multiple roots. :arg custom_rules: A map of rule names to custom-coded rules: Expressions """ tree = rule_grammar.parse(rules) return RuleVisitor(custom_rules).visit(tree) def parse(self, text, pos=0): """Parse some text with the :term:`default rule`. :arg pos: The index at which to start parsing """ self._check_default_rule() return self.default_rule.parse(text, pos=pos) def match(self, text, pos=0): """Parse some text with the :term:`default rule` but not necessarily all the way to the end. :arg pos: The index at which to start parsing """ self._check_default_rule() return self.default_rule.match(text, pos=pos) def _check_default_rule(self): """Raise RuntimeError if there is no default rule defined.""" if not self.default_rule: raise RuntimeError("Can't call parse() on a Grammar that has no " "default rule. Choose a specific rule instead, " "like some_grammar['some_rule'].parse(...).") def __str__(self): """Return a rule string that, when passed to the constructor, would reconstitute the grammar.""" exprs = [self.default_rule] if self.default_rule else [] exprs.extend(expr for expr in itervalues(self) if expr is not self.default_rule) return '\n'.join(expr.as_rule() for expr in exprs) def __repr__(self): """Return an expression that will reconstitute the grammar.""" return "Grammar('%s')" % str(self).encode('string_escape') class TokenGrammar(Grammar): """A Grammar which takes a list of pre-lexed tokens instead of text This is useful if you want to do the lexing yourself, as a separate pass: for example, to implement indentation-based languages. """ def _expressions_from_rules(self, rules, custom_rules): tree = rule_grammar.parse(rules) return TokenRuleVisitor(custom_rules).visit(tree) class BootstrappingGrammar(Grammar): """The grammar used to recognize the textual rules that describe other grammars This grammar gets its start from some hard-coded Expressions and claws its way from there to an expression tree that describes how to parse the grammar description syntax. """ def _expressions_from_rules(self, rule_syntax, custom_rules): """Return the rules for parsing the grammar definition syntax. Return a 2-tuple: a dict of rule names pointing to their expressions, and then the top-level expression for the first rule. """ # Hard-code enough of the rules to parse the grammar that describes the # grammar description language, to bootstrap: comment = Regex(r'#[^\r\n]*', name='comment') meaninglessness = OneOf(Regex(r'\s+'), comment, name='meaninglessness') _ = ZeroOrMore(meaninglessness, name='_') equals = Sequence(Literal('='), _, name='equals') label = Sequence(Regex(r'[a-zA-Z_][a-zA-Z_0-9]*'), _, name='label') reference = Sequence(label, Not(equals), name='reference') quantifier = Sequence(Regex(r'[*+?]'), _, name='quantifier') # This pattern supports empty literals. TODO: A problem? spaceless_literal = Regex(r'u?r?"[^"\\]*(?:\\.[^"\\]*)*"', ignore_case=True, dot_all=True, name='spaceless_literal') literal = Sequence(spaceless_literal, _, name='literal') regex = Sequence(Literal('~'), literal, Regex('[ilmsux]*', ignore_case=True), _, name='regex') atom = OneOf(reference, literal, regex, name='atom') quantified = Sequence(atom, quantifier, name='quantified') term = OneOf(quantified, atom, name='term') not_term = Sequence(Literal('!'), term, _, name='not_term') term.members = (not_term,) + term.members sequence = Sequence(term, OneOrMore(term), name='sequence') or_term = Sequence(Literal('/'), _, term, name='or_term') ored = Sequence(term, OneOrMore(or_term), name='ored') expression = OneOf(ored, sequence, term, name='expression') rule = Sequence(label, equals, expression, name='rule') rules = Sequence(_, OneOrMore(rule), name='rules') # Use those hard-coded rules to parse the (more extensive) rule syntax. # (For example, unless I start using parentheses in the rule language # definition itself, I should never have to hard-code expressions for # those above.) rule_tree = rules.parse(rule_syntax) # Turn the parse tree into a map of expressions: return RuleVisitor().visit(rule_tree) # The grammar for parsing PEG grammar definitions: # This is a nice, simple grammar. We may someday add to it, but it's a safe bet # that the future will always be a superset of this. rule_syntax = (r''' # Ignored things (represented by _) are typically hung off the end of the # leafmost kinds of nodes. Literals like "/" count as leaves. rules = _ rule* rule = label equals expression equals = "=" _ literal = spaceless_literal _ # So you can't spell a regex like `~"..." ilm`: spaceless_literal = ~"u?r?\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\""is / ~"u?r?'[^'\\\\]*(?:\\\\.[^'\\\\]*)*'"is expression = ored / sequence / term or_term = "/" _ term ored = term or_term+ sequence = term term+ not_term = "!" term _ lookahead_term = "&" term _ term = not_term / lookahead_term / quantified / atom quantified = atom quantifier atom = reference / literal / regex / parenthesized regex = "~" spaceless_literal ~"[ilmsux]*"i _ parenthesized = "(" _ expression ")" _ quantifier = ~"[*+?]" _ reference = label !equals # A subsequent equal sign is the only thing that distinguishes a label # (which begins a new rule) from a reference (which is just a pointer to a # rule defined somewhere else): label = ~"[a-zA-Z_][a-zA-Z_0-9]*" _ # _ = ~r"\s*(?:#[^\r\n]*)?\s*" _ = meaninglessness* meaninglessness = ~r"\s+" / comment comment = ~r"#[^\r\n]*" ''') class LazyReference(text_type): """A lazy reference to a rule, which we resolve after grokking all the rules""" name = u'' # Just for debugging: def _as_rhs(self): return u'<LazyReference to %s>' % self class RuleVisitor(NodeVisitor): """Turns a parse tree of a grammar definition into a map of ``Expression`` objects This is the magic piece that breathes life into a parsed bunch of parse rules, allowing them to go forth and parse other things. """ quantifier_classes = {'?': Optional, '*': ZeroOrMore, '+': OneOrMore} visit_expression = visit_term = visit_atom = NodeVisitor.lift_child def __init__(self, custom_rules=None): """Construct. :arg custom_rules: A dict of {rule name: expression} holding custom rules which will take precedence over the others """ self.custom_rules = custom_rules or {} def visit_parenthesized(self, parenthesized, _a): """Treat a parenthesized subexpression as just its contents. Its position in the tree suffices to maintain its grouping semantics. """ (left_paren, _1, expression, right_paren, _2) = _a return expression def visit_quantifier(self, quantifier, _a): """Turn a quantifier into just its symbol-matching node.""" (symbol, _1) = _a return symbol def visit_quantified(self, quantified, _a): (atom, quantifier) = _a return self.quantifier_classes[quantifier.text](atom) def visit_lookahead_term(self, lookahead_term, _a): (ampersand, term, _) = _a return Lookahead(term) def visit_not_term(self, not_term, _a): (exclamation, term, _) = _a return Not(term) def visit_rule(self, rule, _a): """Assign a name to the Expression and return it.""" (label, equals, expression) = _a expression.name = label # Assign a name to the expr. return expression def visit_sequence(self, sequence, _a): """A parsed Sequence looks like [term node, OneOrMore node of ``another_term``s]. Flatten it out.""" (term, other_terms) = _a return Sequence(term, *other_terms) def visit_ored(self, ored, _a): (first_term, other_terms) = _a return OneOf(first_term, *other_terms) def visit_or_term(self, or_term, _a): """Return just the term from an ``or_term``. We already know it's going to be ored, from the containing ``ored``. """ (slash, _, term) = _a return term def visit_label(self, label, _a): """Turn a label into a unicode string.""" (name, _) = _a return name.text def visit_reference(self, reference, _a): """Stick a :class:`LazyReference` in the tree as a placeholder. We resolve them all later. """ (label, not_equals) = _a return LazyReference(label) def visit_regex(self, regex, _a): """Return a ``Regex`` expression.""" (tilde, literal, flags, _) = _a flags = flags.text.upper() pattern = literal.literal # Pull the string back out of the Literal # object. return Regex(pattern, ignore_case='I' in flags, locale='L' in flags, multiline='M' in flags, dot_all='S' in flags, unicode='U' in flags, verbose='X' in flags) def visit_spaceless_literal(self, spaceless_literal, visited_children): """Turn a string literal into a ``Literal`` that recognizes it.""" return Literal(evaluate_string(spaceless_literal.text)) def visit_literal(self, literal, _a): """Pick just the literal out of a literal-and-junk combo.""" (spaceless_literal, _) = _a return spaceless_literal def generic_visit(self, node, visited_children): """Replace childbearing nodes with a list of their children; keep others untouched. For our case, if a node has children, only the children are important. Otherwise, keep the node around for (for example) the flags of the regex rule. Most of these kept-around nodes are subsequently thrown away by the other visitor methods. We can't simply hang the visited children off the original node; that would be disastrous if the node occurred in more than one place in the tree. """ return visited_children or node # should semantically be a tuple def _resolve_refs(self, rule_map, expr, done): """Return an expression with all its lazy references recursively resolved. Resolve any lazy references in the expression ``expr``, recursing into all subexpressions. :arg done: The set of Expressions that have already been or are currently being resolved, to ward off redundant work and prevent infinite recursion for circular refs """ if isinstance(expr, LazyReference): label = text_type(expr) try: reffed_expr = rule_map[label] except KeyError: raise UndefinedLabel(expr) return self._resolve_refs(rule_map, reffed_expr, done) else: if getattr(expr, 'members', ()) and expr not in done: # Prevents infinite recursion for circular refs. At worst, one # of `expr.members` can refer back to `expr`, but it can't go # any farther. done.add(expr) expr.members = [self._resolve_refs(rule_map, member, done) for member in expr.members] return expr def visit_rules(self, node, _a): """Collate all the rules into a map. Return (map, default rule). The default rule is the first one. Or, if you have more than one rule of that name, it's the last-occurring rule of that name. (This lets you override the default rule when you extend a grammar.) If there are no string-based rules, the default rule is None, because the custom rules, due to being kwarg-based, are unordered. """ (_, rules) = _a # Map each rule's name to its Expression. Later rules of the same name # override earlier ones. This lets us define rules multiple times and # have the last declaration win, so you can extend grammars by # concatenation. rule_map = dict((expr.name, expr) for expr in rules) # And custom rules override string-based rules. This is the least # surprising choice when you compare the dict constructor: # dict({'x': 5}, x=6). rule_map.update(self.custom_rules) # Resolve references. This tolerates forward references. done = set() rule_map = dict((expr.name, self._resolve_refs(rule_map, expr, done)) for expr in itervalues(rule_map)) # isinstance() is a temporary hack around the fact that * rules don't # always get transformed into lists by NodeVisitor. We should fix that; # it's surprising and requires writing lame branches like this. return rule_map, (rule_map[rules[0].name] if isinstance(rules, list) and rules else None) class TokenRuleVisitor(RuleVisitor): """A visitor which builds expression trees meant to work on sequences of pre-lexed tokens rather than strings""" def visit_spaceless_literal(self, spaceless_literal, visited_children): """Turn a string literal into a ``TokenMatcher`` that matches ``Token`` objects by their ``type`` attributes.""" return TokenMatcher(evaluate_string(spaceless_literal.text)) def visit_regex(self, regex, _a): (tilde, literal, flags, _) = _a raise BadGrammar('Regexes do not make sense in TokenGrammars, since ' 'TokenGrammars operate on pre-lexed tokens rather ' 'than characters.') # Bootstrap to level 1... rule_grammar = BootstrappingGrammar(rule_syntax) # ...and then to level 2. This establishes that the node tree of our rule # syntax is built by the same machinery that will build trees of our users' # grammars. And the correctness of that tree is tested, indirectly, in # test_grammar. rule_grammar = Grammar(rule_syntax) # TODO: Teach Expression trees how to spit out Python representations of # themselves. Then we can just paste that in above, and we won't have to # bootstrap on import. Though it'll be a little less DRY. [Ah, but this is not # so clean, because it would have to output multiple statements to get multiple # refs to a single expression hooked up.]
{ "repo_name": "smurfix/parsimonious", "path": "parsimonious/grammar.py", "copies": "1", "size": "19455", "license": "mit", "hash": 6409841884774660000, "line_mean": 38.1448692153, "line_max": 94, "alpha_frac": 0.6156772038, "autogenerated": false, "ratio": 4.197411003236246, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0006255467080068212, "num_lines": 497 }
"""A convenient API to access the GPIO pins of the Raspberry Pi. """ import subprocess from quick2wire.board_revision import revision from quick2wire.selector import EDGE def gpio_admin(subcommand, pin, pull=None): if pull: subprocess.check_call(["gpio-admin", subcommand, str(pin), pull]) else: subprocess.check_call(["gpio-admin", subcommand, str(pin)]) Out = "out" In = "in" Rising = "rising" Falling = "falling" Both = "both" PullDown = "pulldown" PullUp = "pullup" class PinAPI(object): def __init__(self, bank, index): self._bank = bank self._index = index @property def index(self): return self._index @property def bank(self): return self._bank def __enter__(self): self.open() return self def __exit__(self, exc_type, exc_value, traceback): self.close() value = property(lambda p: p.get(), lambda p, v: p.set(v), doc="""The value of the pin: 1 if the pin is high, 0 if the pin is low.""") class PinBankAPI(object): def __getitem__(self, n): if 0 < n < len(self): raise ValueError("no pin index {n} out of range", n=n) return self.pin(n) def write(self): pass def read(self): pass class Pin(PinAPI): """Controls a GPIO pin.""" __trigger__ = EDGE def __init__(self, bank, index, soc_pin_number, direction=In, interrupt=None, pull=None): """Creates a pin Parameters: user_pin_number -- the identity of the pin used to create the derived class. soc_pin_number -- the pin on the header to control, identified by the SoC pin number. direction -- (optional) the direction of the pin, either In or Out. interrupt -- (optional) pull -- (optional) Raises: IOError -- could not export the pin (if direction is given) """ super(Pin, self).__init__(None, index) self._soc_pin_number = soc_pin_number self._file = None self._direction = direction self._interrupt = interrupt self._pull = pull @property def soc_pin_number(self): return self._soc_pin_number def open(self): gpio_admin("export", self.soc_pin_number, self._pull) self._file = open(self._pin_path("value"), "r+") self._write("direction", self._direction) if self._direction == In: self._write("edge", self._interrupt if self._interrupt is not None else "none") def close(self): if not self.closed: if self.direction == Out: self.value = 0 self._file.close() self._file = None self._write("direction", In) self._write("edge", "none") gpio_admin("unexport", self.soc_pin_number) def get(self): """The current value of the pin: 1 if the pin is high or 0 if the pin is low. The value can only be set if the pin's direction is Out. Raises: IOError -- could not read or write the pin's value. """ self._check_open() self._file.seek(0) v = self._file.read() return int(v) if v else 0 def set(self, new_value): self._check_open() if self._direction != Out: raise ValueError("not an output pin") self._file.seek(0) self._file.write(str(int(new_value))) self._file.flush() @property def direction(self): """The direction of the pin: either In or Out. The value of the pin can only be set if its direction is Out. Raises: IOError -- could not set the pin's direction. """ return self._direction @direction.setter def direction(self, new_value): self._write("direction", new_value) self._direction = new_value @property def interrupt(self): """The interrupt property specifies what event (if any) will raise an interrupt. One of: Rising -- voltage changing from low to high Falling -- voltage changing from high to low Both -- voltage changing in either direction None -- interrupts are not raised Raises: IOError -- could not read or set the pin's interrupt trigger """ return self._interrupt @interrupt.setter def interrupt(self, new_value): self._write("edge", new_value) self._interrupt = new_value @property def pull(self): return self._pull def fileno(self): """Return the underlying file descriptor. Useful for select, epoll, etc.""" return self._file.fileno() @property def closed(self): """Returns if this pin is closed""" return self._file is None or self._file.closed def _check_open(self): if self.closed: raise IOError(str(self) + " is closed") def _write(self, filename, value): with open(self._pin_path(filename), "w+") as f: f.write(value) def _pin_path(self, filename=""): return "/sys/devices/virtual/gpio/gpio%i/%s" % (self.soc_pin_number, filename) def __repr__(self): return self.__module__ + "." + str(self) def __str__(self): return "{type}({index})".format( type=self.__class__.__name__, index=self.index) class PinBank(PinBankAPI): def __init__(self, index_to_soc_fn, count=None): super(PinBank, self).__init__() self._index_to_soc = index_to_soc_fn self._count = count def pin(self, index, *args, **kwargs): return Pin(self, index, self._index_to_soc(index), *args, **kwargs) @property def has_len(self): return self._count is not None def __len__(self): if self._count is not None: return self._count else: raise TypeError(self.__class__.__name__ + " has no len") BUTTON = 0 LED = 1 SPI_INTERRUPT = 6 I2C_INTERRUPT = 7 _pi_revision = revision() if _pi_revision == 0: # Not running on the Raspberry Pi, so define no-op pin banks pins = PinBank(lambda p: p) pi_broadcom_soc = pins pi_header_1 = pins else: def by_revision(d): return d[_pi_revision] # Maps header pin numbers to SoC GPIO numbers # See http://elinux.org/RPi_Low-level_peripherals # # Note: - header pins are numbered from 1, SoC GPIO from zero # - the Pi documentation identifies some header pins as GPIO0, # GPIO1, etc., but these are not the same as the SoC GPIO # numbers. _pi_header_1_pins = { 3: by_revision({1: 0, 2: 2}), 5: by_revision({1: 1, 2: 3}), 7: 4, 8: 14, 10: 15, 11: 17, 12: 18, 13: by_revision({1: 21, 2: 27}), 15: 22, 16: 23, 18: 24, 19: 10, 21: 9, 22: 25, 23: 11, 24: 8, 26: 7 } _pi_gpio_pins = [_pi_header_1_pins[i] for i in [11, 12, 13, 15, 16, 18, 22, 7]] def lookup(pin_mapping, i): try: if i >= 0: return pin_mapping[i] except LookupError: pass raise IndexError(str(i) + " is not a valid pin index") def map_with(pin_mapping): return lambda i: lookup(pin_mapping, i) pi_broadcom_soc = PinBank(lambda p: p) pi_header_1 = PinBank(map_with(_pi_header_1_pins)) pins = PinBank(map_with(_pi_gpio_pins), len(_pi_gpio_pins))
{ "repo_name": "FuelCellUAV/FC_datalogger", "path": "quick2wire/quick2wire/gpio.py", "copies": "1", "size": "7683", "license": "cc0-1.0", "hash": 6397126853528005000, "line_mean": 25.4931034483, "line_max": 96, "alpha_frac": 0.5540804373, "autogenerated": false, "ratio": 3.7441520467836256, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47982324840836255, "avg_score": null, "num_lines": null }
'''A convenient class for parsing HTML pages.''' from __future__ import unicode_literals from HTMLParser import HTMLParser import logging import re from RSSvk.core import Error LOG = logging.getLogger(__name__) LOG.setLevel(logging.INFO) class HTMLPageParser(HTMLParser): '''A convenient class for parsing HTML pages.''' tag_name_regex = '[a-zA-Z][-.a-zA-Z0-9:_]*' '''A regular expression for tag name.''' attribute_name_regex = tag_name_regex '''A regular expression for attribute name.''' tag_attrs_regex = re.sub(r'\s*', '', r''' (?:\s+ ''' + attribute_name_regex + r''' (?:\s*=\s* (?: '[^']*' |"[^"]*" |[^'"/>\s]+ ) )? )* ''') '''A regular expression for tag attributes.''' script_regex = re.compile('<script' + tag_attrs_regex + '>.*?</script>', re.DOTALL | re.IGNORECASE) '''A regular expression for matching scripts.''' __invalid_tag_attr_spacing_regex = re.compile(r''' ( # Tag name <''' + tag_name_regex + r''' # Zero or several attributes ''' + tag_attrs_regex + r''' # Two attributes without a space between them \s+ # whitespace before attribute name ''' + attribute_name_regex + r''' # attribute name \s*=\s* # value indicator (?: '[^']*' # LITA-enclosed value |"[^"]*" # LIT-enclosed value ) ) ([^\s>]) # Do not include / to make the preparation replacement for __invalid_tag_attr_regex ''', re.VERBOSE) ''' A regular expression for matching a common error in specifying tag attributes. ''' __invalid_tag_attr_regex = re.compile(r''' ( # Tag name <''' + tag_name_regex + r''' # Zero or several attributes ''' + tag_attrs_regex + r''' ) \s+(?: # Invalid characters instead of an attribute [^\sa-zA-Z/>]\S* | # Sole slash /\s | # Invalid characters starting from slash instead of an attribute /[^>\s]+ ) ''', re.VERBOSE) ''' A regular expression for matching HTML errors like: <a class="app photo"/app2322149_58238998?from_id=2381857&loc=addneighbour onclick="return cur.needLoginBox()"> ''' __empty_tags = 'area|base|basefont|br|col|frame|hr|img|input|link|meta|param' '''A list of all HTML empty tags.''' __misopened_tag_regex = re.compile(r'<(' + __empty_tags + tag_attrs_regex + r')\s*>', re.IGNORECASE) '''A regular expression for matching opened tags that should be closed.''' __tag_stack = None '''A stack of currently opened HTML tags.''' __cur_data = None ''' Accumulates data between handle_charref(), handle_entityref() and handle_data() calls. ''' def __init__(self): HTMLParser.__init__(self) def handle_charref(self, name): '''Handles a character reference of the form &#ref;.''' self.__accumulate_data('&#' + name + ';') def handle_data(self, data): '''Handles data.''' self.__accumulate_data(data) def handle_endtag(self, tag_name): '''Handles end of a tag.''' self.__handle_data_if_exists() if self.__get_cur_tag()['name'] == tag_name: self.__close_tag(self.__tag_stack.pop()) else: for tag_id in xrange(len(self.__tag_stack) - 1, -1, -1): if self.__tag_stack[tag_id]['name'] == tag_name: for tag in reversed(self.__tag_stack[tag_id + 1:]): self.__close_tag(tag, forced = True) self.__tag_stack.pop() self.__close_tag(self.__tag_stack.pop()) break else: LOG.debug('Dropping excess end tag "%s"...', tag_name) def handle_entityref(self, name): '''Handles a general entity reference of the form &name;.''' self.__accumulate_data('&' + name + ';') def handle_root_data(self, tag, data): '''Handles data inside of the root of the document.''' LOG.debug('%s', data) def handle_root(self, tag, attrs, empty): '''Handles a tag inside of the root of the document.''' LOG.debug('<%s %s%s>', tag['name'], attrs, '/' if empty else '') tag['new_tag_handler'] = self.handle_root tag['data_handler'] = self.handle_root_data tag['end_tag_handler'] = self.handle_root_end def handle_root_end(self, tag): '''Handles end of the root of the document.''' LOG.debug('</%s>', tag['name']) def handle_startendtag(self, tag, attrs): '''Handles start of an XHTML-style empty tag.''' self.__handle_data_if_exists() self.__handle_start_tag(tag, attrs, True) def handle_starttag(self, tag, attrs): '''Handles start of a tag.''' self.__handle_data_if_exists() self.__handle_start_tag(tag, attrs, False) def reset(self): '''Resets the parser.''' HTMLParser.reset(self) self.__tag_stack = [{ # Add fake root tag 'name': None, 'new_tag_handler': self.handle_root, 'data_handler': self.handle_root_data, 'end_tag_handler': self.handle_root_end, }] def parse(self, html): '''Parses the specified HTML page.''' html = self.__fix_html(html) self.reset() try: # Run the parser self.feed(html) self.close() finally: # Close all unclosed tags for tag in self.__tag_stack[1:]: self.__close_tag(tag, True) def __accumulate_data(self, data): ''' Accumulates data between handle_charref(), handle_entityref() and handle_data() calls. ''' if self.__cur_data is None: self.__cur_data = data else: self.__cur_data += data def __close_tag(self, tag, forced = False): '''Forces closing of an unclosed tag.''' if forced: LOG.debug('Force closing of unclosed tag "%s".', tag['name']) else: LOG.debug('Tag %s closed.', tag) if 'end_tag_handler' in tag: tag['end_tag_handler'](tag) LOG.debug('Current tag: %s.', self.__get_cur_tag()) def __fix_html(self, html): '''Fixes various things that may confuse the Python's HTML parser.''' html = self.script_regex.sub('', html) loop_replacements = ( lambda html: self.__invalid_tag_attr_spacing_regex.subn(r'\1 \2', html), lambda html: self.__invalid_tag_attr_regex.subn(r'\1 ', html), ) for loop_replacement in loop_replacements: for i in xrange(0, 1000): html, changed = loop_replacement(html) if not changed: break else: raise Error('Too many errors in the HTML or infinite loop.') html = self.__misopened_tag_regex.sub(r'<\1 />', html) return html def __get_cur_tag(self): '''Returns currently opened tag.''' return self.__tag_stack[-1] def __handle_data_if_exists(self): '''Handles accumulated data (if exists).''' data = self.__cur_data if data is None: return self.__cur_data = None tag = self.__get_cur_tag() handler = tag.get('data_handler') if handler is not None: LOG.debug('Data "%s" in "%s" with handler %s.', data, tag['name'], handler.func_name) handler(tag, data) def __handle_start_tag(self, tag_name, attrs, empty): '''Handles start of any tag.''' tag = { 'name': tag_name } handler = self.__get_cur_tag().get('new_tag_handler') if handler is not None: attrs = self.__parse_attrs(attrs) LOG.debug('Start tag: %s %s with handler %s.', tag, attrs, handler.func_name) handler(tag, attrs, empty) if not empty: self.__tag_stack.append(tag) def __parse_attrs(self, attrs_tuple): '''Converts tag attributes from a tuple to a dictionary.''' attrs = {} for attr, value in attrs_tuple: attrs[attr.lower()] = value return attrs
{ "repo_name": "Densvin/RSSVK", "path": "vkfeed/tools/html_parser.py", "copies": "1", "size": "8733", "license": "bsd-2-clause", "hash": -295135026531479500, "line_mean": 26.5488958991, "line_max": 128, "alpha_frac": 0.5151723348, "autogenerated": false, "ratio": 4.0262793914246195, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0026429666368483766, "num_lines": 317 }
'''A convenient class for parsing HTML pages.''' from __future__ import unicode_literals from HTMLParser import HTMLParser import logging import re from vkfeed.core import Error LOG = logging.getLogger(__name__) LOG.setLevel(logging.INFO) class HTMLPageParser(HTMLParser): '''A convenient class for parsing HTML pages.''' tag_name_regex = '[a-zA-Z][-.a-zA-Z0-9:_]*' '''A regular expression for tag name.''' attribute_name_regex = tag_name_regex '''A regular expression for attribute name.''' tag_attrs_regex = re.sub(r'\s*', '', r''' (?:\s+ ''' + attribute_name_regex + r''' (?:\s*=\s* (?: '[^']*' |"[^"]*" |[^'"/>\s]+ ) )? )* ''') '''A regular expression for tag attributes.''' script_regex = re.compile('<script' + tag_attrs_regex + '>.*?</script>', re.DOTALL | re.IGNORECASE) '''A regular expression for matching scripts.''' __invalid_tag_attr_spacing_regex = re.compile(r''' ( # Tag name <''' + tag_name_regex + r''' # Zero or several attributes ''' + tag_attrs_regex + r''' # Two attributes without a space between them \s+ # whitespace before attribute name ''' + attribute_name_regex + r''' # attribute name \s*=\s* # value indicator (?: '[^']*' # LITA-enclosed value |"[^"]*" # LIT-enclosed value ) ) ([^\s>]) # Do not include / to make the preparation replacement for __invalid_tag_attr_regex ''', re.VERBOSE) ''' A regular expression for matching a common error in specifying tag attributes. ''' __invalid_tag_attr_regex = re.compile(r''' ( # Tag name <''' + tag_name_regex + r''' # Zero or several attributes ''' + tag_attrs_regex + r''' ) \s+(?: # Invalid characters instead of an attribute [^\sa-zA-Z/>]\S* | # Sole slash /\s | # Invalid characters starting from slash instead of an attribute /[^>\s]+ ) ''', re.VERBOSE) ''' A regular expression for matching HTML errors like: <a class="app photo"/app2322149_58238998?from_id=2381857&loc=addneighbour onclick="return cur.needLoginBox()"> ''' __empty_tags = 'area|base|basefont|br|col|frame|hr|img|input|link|meta|param' '''A list of all HTML empty tags.''' __misopened_tag_regex = re.compile(r'<(' + __empty_tags + tag_attrs_regex + r')\s*>', re.IGNORECASE) '''A regular expression for matching opened tags that should be closed.''' __tag_stack = None '''A stack of currently opened HTML tags.''' __cur_data = None ''' Accumulates data between handle_charref(), handle_entityref() and handle_data() calls. ''' def __init__(self): HTMLParser.__init__(self) def handle_charref(self, name): '''Handles a character reference of the form &#ref;.''' self.__accumulate_data('&#' + name + ';') def handle_data(self, data): '''Handles data.''' self.__accumulate_data(data) def handle_endtag(self, tag_name): '''Handles end of a tag.''' self.__handle_data_if_exists() if self.__get_cur_tag()['name'] == tag_name: self.__close_tag(self.__tag_stack.pop()) else: for tag_id in xrange(len(self.__tag_stack) - 1, -1, -1): if self.__tag_stack[tag_id]['name'] == tag_name: for tag in reversed(self.__tag_stack[tag_id + 1:]): self.__close_tag(tag, forced = True) self.__tag_stack.pop() self.__close_tag(self.__tag_stack.pop()) break else: LOG.debug('Dropping excess end tag "%s"...', tag_name) def handle_entityref(self, name): '''Handles a general entity reference of the form &name;.''' self.__accumulate_data('&' + name + ';') def handle_root_data(self, tag, data): '''Handles data inside of the root of the document.''' LOG.debug('%s', data) def handle_root(self, tag, attrs, empty): '''Handles a tag inside of the root of the document.''' LOG.debug('<%s %s%s>', tag['name'], attrs, '/' if empty else '') tag['new_tag_handler'] = self.handle_root tag['data_handler'] = self.handle_root_data tag['end_tag_handler'] = self.handle_root_end def handle_root_end(self, tag): '''Handles end of the root of the document.''' LOG.debug('</%s>', tag['name']) def handle_startendtag(self, tag, attrs): '''Handles start of an XHTML-style empty tag.''' self.__handle_data_if_exists() self.__handle_start_tag(tag, attrs, True) def handle_starttag(self, tag, attrs): '''Handles start of a tag.''' self.__handle_data_if_exists() self.__handle_start_tag(tag, attrs, False) def reset(self): '''Resets the parser.''' HTMLParser.reset(self) self.__tag_stack = [{ # Add fake root tag 'name': None, 'new_tag_handler': self.handle_root, 'data_handler': self.handle_root_data, 'end_tag_handler': self.handle_root_end, }] def parse(self, html): '''Parses the specified HTML page.''' html = self.__fix_html(html) self.reset() try: # Run the parser self.feed(html) self.close() finally: # Close all unclosed tags for tag in self.__tag_stack[1:]: self.__close_tag(tag, True) def __accumulate_data(self, data): ''' Accumulates data between handle_charref(), handle_entityref() and handle_data() calls. ''' if self.__cur_data is None: self.__cur_data = data else: self.__cur_data += data def __close_tag(self, tag, forced = False): '''Forces closing of an unclosed tag.''' if forced: LOG.debug('Force closing of unclosed tag "%s".', tag['name']) else: LOG.debug('Tag %s closed.', tag) if 'end_tag_handler' in tag: tag['end_tag_handler'](tag) LOG.debug('Current tag: %s.', self.__get_cur_tag()) def __fix_html(self, html): '''Fixes various things that may confuse the Python's HTML parser.''' html = self.script_regex.sub('', html) loop_replacements = ( lambda html: self.__invalid_tag_attr_spacing_regex.subn(r'\1 \2', html), lambda html: self.__invalid_tag_attr_regex.subn(r'\1 ', html), ) for loop_replacement in loop_replacements: for i in xrange(0, 1000): html, changed = loop_replacement(html) if not changed: break else: raise Error('Too many errors in the HTML or infinite loop.') html = self.__misopened_tag_regex.sub(r'<\1 />', html) return html def __get_cur_tag(self): '''Returns currently opened tag.''' return self.__tag_stack[-1] def __handle_data_if_exists(self): '''Handles accumulated data (if exists).''' data = self.__cur_data if data is None: return self.__cur_data = None tag = self.__get_cur_tag() handler = tag.get('data_handler') if handler is not None: LOG.debug('Data "%s" in "%s" with handler %s.', data, tag['name'], handler.func_name) handler(tag, data) def __handle_start_tag(self, tag_name, attrs, empty): '''Handles start of any tag.''' tag = { 'name': tag_name } handler = self.__get_cur_tag().get('new_tag_handler') if handler is not None: attrs = self.__parse_attrs(attrs) LOG.debug('Start tag: %s %s with handler %s.', tag, attrs, handler.func_name) handler(tag, attrs, empty) if not empty: self.__tag_stack.append(tag) def __parse_attrs(self, attrs_tuple): '''Converts tag attributes from a tuple to a dictionary.''' attrs = {} for attr, value in attrs_tuple: attrs[attr.lower()] = value return attrs
{ "repo_name": "SapronovMaxim/vkfeed_JIT", "path": "vkfeed/tools/html_parser.py", "copies": "16", "size": "8734", "license": "bsd-2-clause", "hash": 7842032908226018000, "line_mean": 26.5520504732, "line_max": 128, "alpha_frac": 0.5152278452, "autogenerated": false, "ratio": 4.024884792626728, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
""" A convenient plotting container In this package implements :class:`Plotter`, which is a simple container to dictionary like structure (e.g. :class:`dict`, :class:`np.recarray`, :class:`pandas.DataFrame`). It allows the user to plot directly using keys of the data and also allows rapid group plotting routines (groupy and facets). I was basically tired of all the packages doing fancy things and not allowing basics or requiring a lot of dependencies. Examples -------- .. code-block::python >> d = {...} >> p = plotter.Plotter(d) >> g = p.groupby('BRK', markers='<^>v.oxs', colors='parula_r') >> g.plot('CRA', 'CDEC') >> g.colorbar().set_label('BRK') Multiple groups can be done as well. (Caution, the `facet` option is not robust) .. code-block::python >> g = p.groupby('BRK', facet=True, sharex=True, sharey=True)\ .groupby('FLD') >> g.plot('CRA', 'CDEC', 'o') .. note:: * tested with python 2.7, & 3.4 * tested compatible with pandas (not required) * requirements: numpy, matplotlib :author: Morgan Fouesneau """ from __future__ import (absolute_import, division, print_function) import sys import pylab as plt import matplotlib as mpl import numpy as np import itertools from matplotlib.ticker import MaxNLocator from . import astro PY3 = sys.version_info[0] > 2 if PY3: basestring = (str, bytes) else: basestring = (str, unicode) __all__ = ['Group', 'Plotter', 'create_common_cbar', 'colorify', 'evalexpr', 'create_common_legend'] def get_doc_from(name, obj=plt): """ decorator to add documentation from a module (default: matplotlib) Parameters ---------- name: str name of the function to get the documentation from obj: object module from which the function is an attribute Returns ------- decorator: callable decorator """ def deco(func): fn = getattr(obj, name, None) if fn is not None: if func.__doc__ is None: func.__doc__ = fn.__doc__ else: func.__doc__ += fn.__doc__ return func return deco def _arg_groupby(data, key): """ create an iterator which returns (key, index) grouped by each value of key(value) """ val = data[key] def parse_missing_data(x, dtype=str): """ Make sure null/missing values are still sorted """ cond = x in [None, '', 'None', 'none', float('nan'), 'nan', 'NaN', 'null', 'Null', float('inf'), 'inf'] return cond, x ind = sorted(zip(val, range(len(val))), key=lambda x: parse_missing_data(x[0])) for k, grp in itertools.groupby(ind, lambda x: x[0]): index = [k[1] for k in grp] yield k, index def _groupby(data, key): """ create an iterator which returns (key, DataFrame) grouped by each value of key(value) """ for k, index in _arg_groupby(data, key): d = {a: b[index] for a, b in data.items()} yield k, data.__class__(d) def _safe_compute(val): """ Return the computed value if necessary Dask DataFrame may sometimes need to call compute to interact properly with some calls. This method if a shortcut to make sure we work with values when necessary. """ try: return val.compute() except AttributeError: return val class Group(object): """ Group multiple plotter instances into one container. This offers any function of :class:`Plotter` through an implicit loop of any method It allows for instance to generate multiple plots on the same axes or even facet plot (one per group). .. code-block:: python >> g = Plotter(df).groupby('class') >> g.set_options(facet=True, ncols=2, projection='aitoff') # which is equivalent to >> g = Plotter(df)\ .groupby('class', facet=True, ncols=2, projection='aitoff') >> g.plot('RA', 'Dec', 'o', alpha=0.5, mec='None') Attributes ---------- seq: sequence Sequence of Plotter instances title: str name of the group (used as label is nested groups) facet: bool set to use facets, i.e., one subplot per element of the group markers: iterable sequence of markers one per group linestyles: iterable sequence of linestyles one per group colors: seq or Colormap sequence of colors or Colormap instance from which deriving a sequence of colors to encode each group if Colormap instance, a cmap attribute will be generated after a plot and will refer to the updated instance sharex: bool set to share x-axis with all subplots sharey: bool set to share y-axis with all subplots kwargs: dict any other option will be forwarded to :func:`plt.subplot` .. see also:: :func:`set_options` """ def __init__(self, seq, title='', **kwargs): self.seq = seq self.title = title self.facet = False self.markers = None self.linestyles = None self.colors = None self.ncols = 3 self.sharex = False self.sharey = False self.axes = None self.kwargs = {} self._all_against = False self._show_facet_titles = True self.create_common_cbar = create_common_cbar self.set_options(**kwargs) self.show = plt.show def make_facets(self, show_titles=True): """ generates multiple subplots uses self.ncols as number of columns and subplots are also using self.kwargs. Returns ------- axes: sequence sequence of the axes instance from the subplots show_titles: bool set to add the title of the subplot to the group's name .. see also:: :func:`set_options` """ axes = [] n = len(self) ncols = min(self.ncols, n) nlines = n // ncols if ncols * nlines < n: nlines += 1 if nlines == 0: nlines = 1 ncols = n axes = [] ax = sharex = sharey = None for k in range(n): if self.sharex: sharex = ax if self.sharey: sharey = ax ax = plt.subplot(nlines, ncols, k + 1, sharex=sharex, sharey=sharey, **self.kwargs) axes.append(ax) if (self.seq[k].label is not None) and self._show_facet_title: ax.set_title(self.seq[k].label) if (self.sharex): if k < (n - ncols): plt.setp(ax.get_xticklabels(), visible=False) if (self.sharey): if (k % ncols) > 0: plt.setp(ax.get_yticklabels(), visible=False) self.axes = axes return axes def set_options(self, **kwargs): """ Set some options Parameters ---------- title: str rename the group facet: bool set the group to display facets or one plot ncols: int when facet is True, this gives how many columns should be used markers: seq sequence of markers (will cycle through) linestyles: seq sequence of linestyles (will cycle through) colors: seq or Colormap sequence of colors or Colormap instance from which deriving a sequence of colors to encode each group if Colormap instance, a cmap attribute will be generated after a plot and will refer to the updated instance labels: seq Labels used for each group instead of values sharex: bool set to share x-axis with all subplots sharey: bool set to share y-axis with all subplots all_against: bool set if plotting variables against a common one kwargs: dict any other option will be forwarded to :func:`plt.subplot` Returns ------- self: Group instance returns itself for conveniance when writting one liners. """ title = kwargs.pop('title', None) facet = kwargs.pop('facet', None) ncols = kwargs.pop('ncols', None) markers = kwargs.pop('markers', None) colors = kwargs.pop('colors', None) linestyles = kwargs.pop('linestyles', None) labels = kwargs.pop('labels', None) sharex = kwargs.pop('sharex', None) sharey = kwargs.pop('sharey', None) allow_expressions = kwargs.pop('allow_expressions', None) self._all_against = kwargs.pop('all_against', self._all_against) self._show_facet_title = kwargs.pop('show_facet_title', self._show_facet_title) self.ncols = kwargs.pop('ncols', self.ncols) if sharex is not None: self.sharex = sharex if sharey is not None: self.sharey = sharey if title is not None: self.title = title if facet is not None: self.facet = facet if ncols is not None: self.ncols = ncols if markers is not None: self.markers = markers if colors is not None: self.colors = colors if isinstance(self.colors, basestring): self.colors = plt.cm.get_cmap(self.colors) if linestyles is not None: self.linestyles = linestyles if labels is not None: for k, v in zip(self.seq, itertools.cycle(labels)): k.label = v if allow_expressions is not None: for k in self.seq: k.allow_expressions = allow_expressions self.kwargs.update(kwargs) return self def groupby(self, key, select=None, labels=None, **kwargs): """ Make individual plots per group Parameters ---------- key: str key on which building groups select: sequence explicit selection on the groups if a group does not exist, it will be returned empty labels: dict set to replace the group names by a specific label string during the plot kwargs: dict optional keywords forwarded to :func:`set_options` method Returns ------- g: Group instance group of plotters .. see also:: :func:`set_options` """ gg = [] for sk in self.seq: lst = sk.groupby(key, select=select, labels=labels) for k, v in sk.__dict__.items(): if k not in ['seq', 'title']: setattr(lst, k, v) if getattr(sk, 'title', None) is not None: lst.label = sk.title lst.set_options(**kwargs) gg.append(lst) return self.__class__(gg, title=self.title) def subplot(self, *args, **kwargs): """ A convenient shortcut for one liner use Generates a subplot with given arguments and returns `self`. """ self.axes = plt.subplot(*args, **kwargs) return self def apply(self, fn, *args, **kwargs): """apply function on each element of the group Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the "currently active" matplotlib Axes. """ if self.facet: axes = self.make_facets() ret = [] for ax, element in zip(axes, self.seq): plt.sca(ax) ret.append(element.apply(fn, *args, **kwargs)) return ret def __len__(self): return len(self.seq) def __repr__(self): txt = """Object Group {0:s} (length={2:d}): {1:s}""" return txt.format(self.title, object.__repr__(self), len(self)) def __dir__(self): """ show the content of Plotter """ return self.seq[0].__dir__() def __getattr__(self, k): """ Returns a looper function on each plotter of the group """ cyclenames = 'linestyles', 'colors', 'markers' cyclekw = {k: getattr(self, k) for k in cyclenames} if isinstance(self.colors, mpl.colors.Colormap): if not self._all_against: s = set() for sk in self.seq: s = s.union(set(sk.data[self.title])) else: s = np.arange(len(self.seq)) colors, cmap = colorify(s) cyclekw['colors'] = colors self.cmap = cmap elif self.colors is None: cyclekw['colors'] = plt.rcParams['axes.prop_cycle']\ .by_key()['color'] cc_ = mpl.colors.ColorConverter() cyclekw['colors'] = [cc_.to_rgba(val) for val in cyclekw['colors']] if self.facet: axes = self.make_facets() return self.looper_facet_method(self.seq, k, axes, cyclekw=cyclekw) else: return self.looper_method(self.seq, k, cyclekw=cyclekw) def __iter__(self): """ Iterator over the individual plotter of the group """ for k in self.seq: yield k def __getitem__(self, k): """ Returns one plotter of the group """ return self.seq[k] @staticmethod def looper_method(lst, methodname, cyclekw={}, **kw): """ calls a method on many instance of sequence of objects Parameters ---------- lst: sequence sequence of objects to call the method from methodname: str name of the method to call from each object cyclekw: dict keyword arguments that calls need to cycle over per object. Each element in this dictionary is expected to be a sequence and one element of each will be used per call. It will use :func:`itertools.cycle`. (None elements are filtered) cyclenames = 'linestyles', 'colors', 'markers' kw: dict other keywords (have priority on `cyclekw`) Returns ------- deco: callable mapper function """ cyclenames = 'linestyles', 'colors', 'markers' _cyclekw = {k: itertools.cycle(cyclekw[k]) for k in cyclenames if cyclekw[k] is not None} def next_cyclekw(): a = {k[:-1]: next(v) for k, v in _cyclekw.items()} return a def deco(*args, **kwargs): r = [] for l in lst: k0 = next_cyclekw() kw.update(k0) kw.update(kwargs) if (l.data is None) or _safe_compute(np.size(l.data) == 0): a = None else: a = getattr(l, methodname)(*args, **kw) r.append(a) return r return deco @staticmethod def looper_facet_method(lst, methodname, axes, cyclekw={}, **kw): """ calls a method on many instance of sequence of objects but also imposes ax as keyword argument. This method will also test if there is no data to plot. Parameters ---------- lst: sequence sequence of objects to call the method from methodname: str name of the method to call from each object axes: sequence list of axes, one per call cyclekw: dict keyword arguments that calls need to cycle over per object. Each element in this dictionary is expected to be a sequence and one element of each will be used per call. It will use :func:`itertools.cycle`. (None elements are filtered) cyclenames = 'linestyles', 'colors', 'markers' kw: dict other keywords (have priority on `cyclekw`) Returns ------- deco: callable mapper function """ cyclenames = 'linestyles', 'colors', 'markers' _cyclekw = {k: itertools.cycle(cyclekw[k]) for k in cyclenames if cyclekw[k] is not None} def next_cyclekw(): a = {k[:-1]: next(v) for k, v in _cyclekw.items()} return a def deco(*args, **kwargs): r = [] for l, ax in zip(lst, axes): k0 = next_cyclekw() kw.update(k0) kw.update(kwargs) if (l.data is None) or (np.size(l.data) == 0): _intercept_empty_plot(ax=ax) else: kw['ax'] = ax a = getattr(l, methodname)(*args, **kw) r.append(a) return r return deco @get_doc_from('colorbar') def colorbar(self, *args, **kwargs): if not hasattr(self, 'cmap'): print('No registered colormap with the group') return return plt.colorbar(self.cmap, *args, **kwargs) def __add__(self, other): if isinstance(other, self.__class__): return Group([self.seq, other]) elif isinstance(other, self[0].__class__): # copy the list and append the new element g = self.__class__([k for k in self.seq]) g.seq.append(other) for k, v in self.__dict__.items(): if k not in ['seq', 'title']: setattr(g, k, v) return g else: raise RuntimeError('Cannot add {0} type objects to {1} instance' .format(other.__class__.__name__, self.__class__.__name__)) def pairplot(self, keys=None, **kwargs): """ This is a high-level interface for PairGrid intended to make it easy to draw a few common styles. Parameters ---------- keys: sequence work with only a set of keys if provided. Returns ------- p: PairGrid instance :class`PairGrid` which gives you all possible flexibility. """ if keys is None: keys = self.data.keys() return PairGrid(self, keys, allow_expressions=self.allow_expressions, **kwargs) class Plotter(object): """ A wrapper around plotting functions and DataFrame This should also work with pure dictionary objects. all plotting functions are basically proxies to matplotlib in which arguments can be named columns from the data (not necessary) and each method handles a `ax` keyword to specify a Axes instance to use (default using :func:`plt.gca`) .. code-block:: python Plotter(df).groupby('class')\ .plot('RA', 'Dec', 'o', alpha=0.5, mec='None') Attributes ---------- data: dict-like structure data with column named format label: str, optional label to use on the data as default label allow_expressions: bool, optional set to use math expressions with the keys see :func:`evalexpr` ax: plt.Axes instance contains the last axes reference(s) after a plot (do not exists if no plotting function was called) """ def __init__(self, data, label=None, allow_expressions=False, update_axis_label=True): self.data = data self.label = label self.allow_expressions = allow_expressions self.show = plt.show self.update_axis_label = update_axis_label self.label = label def set_options(self, **kwargs): self.label = kwargs.get('label', self.label) self.allow_expressions = kwargs.get('allow_expressions', self.allow_expressions) self.show = kwargs.get('show', self.show) self.update_axis_label = kwargs.get('update_axis_label', self.label) return self def _ensure_data_type(self, data): """ Make sure data is compatible with a dictionary like interface """ if isinstance(data, dict) or hasattr(data, '__getitem__'): return data # assuming array data = {e: k for e, k in enumerate(np.asarray(data).T)} return data @property def keys(self): try: return self.data.keys() except AttributeError: # Dataframes from pandas and dask return self.data.columns def _value_from_data(self, key): """ Parse a key for existing data in the dataframe. If not found, returns the key directly """ if not isinstance(key, basestring): value = key elif key not in self.data: if self.allow_expressions: try: value = evalexpr(self.data, key) except Exception: pass value = key else: value = self.data[key] return _safe_compute(value) def _select_data(self, selection): """ Parse indices or expression and return selected data""" try: return self.data.selectWhere('*', selection) except ValueError: return self.data.select('*', indices=np.where(selection)[0]) except AttributeError or KeyError: if selection is None: return self.data elif isinstance(selection, basestring): indexes = self.data.eval(selection) return self.data.where(indexes) else: return self.data[selection] def select(self, selection, labels=None, **kwargs): """ Returns a Group from selected data Parameters ---------- selection: str or expression or sequence of these the selection could be a sequence of selections A given selection on the data can be an string or the evaluation of it (boolean array or dask array) labels: sequence(str) the labels of the selections used in the Group Returns ------- group: Group instance group of plotters (one per selection) """ if isinstance(selection, (basestring, type(None))): selection = [selection] if labels is None: labels = [] for num, select in enumerate(selection, 1): if isinstance(select, (basestring, type(None))): labels.append(str(select)) else: labels.append('subset {0:d}'.format(num)) elements = [] for select, label in zip(selection, labels): subdata = self._select_data(select) elements.append(self.__class__(subdata, label=label)) return Group(elements).set_options(**kwargs) def evalexpr(self, expr, exprvars=None, dtype=float): """ evaluate expression based on the data and external variables all np function can be used (log, exp, pi...) Parameters ---------- data: dict or dict-like structure data frame / dict-like structure containing named columns expr: str expression to evaluate on the table includes mathematical operations and attribute names exprvars: dictionary, optional A dictionary that replaces the local operands in current frame. dtype: dtype definition dtype of the output array Returns ------- out : np.array array of the result """ return evalexpr(self.data, expr, exprvars=exprvars, dtype=dtype) def subplot(self, *args, **kwargs): """ A convenient shortcut for one liner use Generates a subplot with given arguments and returns self. """ plt.subplot(*args, **kwargs) return self def colorify(self, key, vmin=None, vmax=None, cmap=None): """ Associate a color map to a quantity vector Parameters ---------- data: sequence values to encode vmin: float minimum value vmax: float maximum value cmap: Colormap instance colormap to use Returns ------- colors: sequence or array one color per input data cmap: Colormap data normalized colormap instance """ return colorify(self.data.evalexpr(key), vmin, vmax, cmap) def apply(self, fn, *args, **kwargs): """ Apply an arbitrary function on the data to plot The first argument of that function must be the dataset By default uses the active axes. Parameters ---------- fn: callable plotting function to apply with args and kwargs arguments Returns ------- r: tuple anything that fn returns. """ if isinstance(fn, basestring): _fn = getattr(self, fn) if _fn is None: raise AttributeError('No function named {0:s}'.format(fn)) return _fn(*args, **kwargs) else: return fn(self.data, *args, **kwargs) @get_doc_from('xlabel') def xlabel(self, *args, **kwargs): """ Set the xlabel of the current plot """ ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() ax.set_xlabel(*args, **kwargs) return self @get_doc_from('ylabel') def ylabel(self, *args, **kwargs): """ Set the ylabel of the current plot """ ax = kwargs.pop('ax', None) if ax is None: plt.gca() ax.set_ylabel(*args, **kwargs) return self def _set_auto_axis_labels(self, xname, yname, ax=None): """ convinient shortcut for labelling axis """ if not self.update_axis_label: return if ax is None: ax = plt.gca() if xname is not None: ax.set_xlabel(xname) if yname is not None: ax.set_ylabel(yname) @get_doc_from('scatter') def scatter(self, x, y, c='k', s=20, *args, **kwargs): _x = self._value_from_data(x) _y = self._value_from_data(y) c = kwargs.pop('color', c) _c = np.atleast_2d(self._value_from_data(c)) s = kwargs.pop('size', s) _s = np.atleast_2d(self._value_from_data(s)) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() if 'label' not in kwargs: kwargs['label'] = self.label self._set_auto_axis_labels(x, y, ax=ax) return ax.scatter(_x, _y, c=_c, s=_s, *args, **kwargs) @get_doc_from('plot') def plot(self, x, y, *args, **kwargs): _x = self._value_from_data(x) _y = self._value_from_data(y) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['label'] = self.label self._set_auto_axis_labels(x, y, ax=ax) return ax.plot(_x, _y, *args, **kwargs) @get_doc_from('bar') def bar(self, x, y, *args, **kwargs): _x = self._value_from_data(x) _y = self._value_from_data(y) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['label'] = self.label self._set_auto_axis_labels(x, y, ax=ax) return ax.bar(_x, _y, *args, **kwargs) @get_doc_from('step') def step(self, x, y, *args, **kwargs): _x = self._value_from_data(x) _y = self._value_from_data(y) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['label'] = self.label self._set_auto_axis_labels(x, y, ax=ax) return ax.step(_x, _y, *args, **kwargs) @get_doc_from('hist') def hist(self, x, *args, **kwargs): _x = self._value_from_data(x) ind = np.isfinite(_x) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['label'] = str(self.label) self._set_auto_axis_labels(x, None, ax=ax) ind = np.isfinite(_x) _w = kwargs.pop('weights', None) if _w is not None: return ax.hist(_x[ind], weights=_w[ind], *args, **kwargs) else: return ax.hist(_x[ind], *args, **kwargs) @get_doc_from('hist2d') def hist2d(self, x, y, *args, **kwargs): _x = self._value_from_data(x) _y = self._value_from_data(y) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['label'] = self.label ind = np.isfinite(_x) & np.isfinite(_y) _w = kwargs.pop('weights', None) self._set_auto_axis_labels(x, y, ax=ax) if _w is not None: return ax.hist2d(_x[ind], _y[ind], weights=_w[ind], *args, **kwargs) else: return ax.hist2d(_x[ind], _y[ind], *args, **kwargs) @get_doc_from('hexbin') def hexbin(self, x, y, C=None, *args, **kwargs): _x = self._value_from_data(x) _y = self._value_from_data(y) _C = self._value_from_data(C) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['label'] = self.label self._set_auto_axis_labels(x, y, ax=ax) if _C is not None: ind = np.isfinite(_x) & np.isfinite(_y) & np.isfinite(_C) return ax.hexbin(_x[ind], _y[ind], C=_C[ind], *args, **kwargs) else: ind = np.isfinite(_x) & np.isfinite(_y) return ax.hexbin(_x[ind], _y[ind], *args, **kwargs) @get_doc_from('violinplot') def violinplot(self, dataset, **kwargs): d = (self._value_from_data(k) for k in dataset) d = [dk[np.isfinite(dk)] for dk in d] ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['labels'] = dataset return ax.violinplot(d, **kwargs) @get_doc_from('boxplot') def boxplot(self, dataset, **kwargs): d = [self._value_from_data(k) for k in dataset] d = [dk[np.isfinite(dk)] for dk in d] ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'labels' not in kwargs: kwargs['labels'] = dataset return ax.boxplot(d, **kwargs) @get_doc_from('healpix_plot', astro) def healpix_plot(self, *args, **kwargs): return astro.healpix_plot(self.data, *args, **kwargs) @get_doc_from('project_aitoff', astro) def plot_aitoff(self, alpha, delta, radians=False, **kwargs): """plot aitoff projection (https://en.wikipedia.org/wiki/Aitoff_projection) projection Parameters ---------- alpha: array azimuth angle delta: array polar angle radians: boolean input and output in radians (True), or degrees (False) """ x, y = astro.project_aitoff(self.data[alpha], self.data[delta], radians=radians) return self.__class__(dict(aitoff_x=x, aitoff_y=y))\ .plot('x', 'y', **kwargs) def groupby(self, key, select=None, labels=None, **kwargs): """ Make individual plots per group Parameters ---------- key: str key on which building groups select: sequence explicit selection on the groups if a group does not exist, it will be returned empty labels: dict set to replace the group names by a specific label string during the plot Returns ------- g: Group instance group of plotters """ r = _groupby(self.data, key) if select is not None: grp = dict((k, v) for k, v in r if k in select) r = [(k, grp.get(k, [])) for k in select] if labels is None: labels = {} lst = [self.__class__(g, label=labels.get(k, k)) for k, g in r] return Group(lst, title=key).set_options(**kwargs) def all_against(self, key, select=None, labels=None, **kwargs): """ Make individual plots per of all variables against one Parameters ---------- key: str key on which plotting everything select: sequence explicit selection on the groups if a group does not exist, it will be returned empty labels: dict set to replace the names by a specific label string during the plot Returns ------- g: Group instance group of plotters """ r = ((other, {key: self.data[key], 'value': self.data[other]}) for other in self.data.keys() if other != key) if select is not None: grp = dict((k, v) for k, v in r if k in select) r = [(k, grp.get(k, [])) for k in select] if labels is None: labels = {} lst = [self.__class__(g, label=labels.get(k, k)) for k, g in r] return Group(lst, title=key, all_against=True).set_options(**kwargs) def lagplot(self, x, t=1, **kwargs): """ A lag plot checks whether a data set or time series is random or not. Random data should not exhibit any identifiable structure in the lag plot. Non-random structure in the lag plot indicates that the underlying data are not random. Parameters ---------- x: str the data column to plot t: int lag to apply, default 1 see also: :func:`scatter` """ _x = self._value_from_data(x) _y = np.hstack([_x[t:], _x[:t]]) ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() self.axes = ax if 'label' not in kwargs: kwargs['label'] = self.label defaults = dict(marker='o', linestyle='None') defaults.update(kwargs) return ax.plot(_x, _y, **defaults) def __add__(self, other): if isinstance(other, self.__class__): return Group([self, other]) else: raise RuntimeError('Cannot add {0} type objects to {1} instance' .format(other.__class__.__name__, self.__class__.__name__)) def pivot_plot(self, key1, key2, plotfn, plotkw={}, **kwargs): """ generate a multiple plots ordered according to 2 keys Parameters ---------- key1: str key along the x-axis key2: str key along the y-axis plotfn: callable the plotting function This function signature must take a dataset and manage an `ax` keyword > plotfn(data, ax=ax, **plotkw) plotkw: dict optional keywords to pass to the plotting function kwargs: dict forwarded to :func:`plt.subplots` Returns ------- axes: sequence list of all axes used in the plot """ grp = self.aggregate(lambda x: x, (key1, key2)) sx = {k[0] for k in grp} sx = {k: e for e, k in enumerate(sx)} sy = {k[1] for k in grp} sy = {k: e for e, k in enumerate(sy)} defaults = dict(sharex=True, sharey=True) defaults.update(**kwargs) fig, axes = plt.subplots(len(sy), len(sx), **defaults) _axes = np.rot90(axes, 3) for (idx1, idx2, data) in grp: e1, e2 = sx[idx1], sy[idx2] plotfn(data, ax=_axes[e1, e2], **plotkw) _axes[e1, e2].set_xlabel('') _axes[e1, e2].set_ylabel('') for ax in axes.ravel(): plt.setp(ax.get_yticklines() + ax.get_xticklines(), visible=False) plt.setp(ax.get_yticklabels() + ax.get_xticklabels(), visible=False) return axes def aggregate(self, func, keys, args=(), kwargs={}): """ Apply func on groups within the data Parameters ---------- func: callable function to apply keys: sequence(str) sequence of keys defining the groups args: tuple optional arguments to func (will be given at the end) kwargs: dict optional keywords to func Returns ------- seq: sequence flattened sequence of keys and value (key1, key2, ... keyn, {}) """ pv = [(k, list(v)) for k, v in self.multigroupby(self.data, *keys)] def _aggregate(a, b=()): data = [] for k, v in a: if type(v) in (list, tuple,): data.append(_aggregate(v, b=(k,))) else: data.append(b + (k, func(v))) return data return list(itertools.chain(*_aggregate(pv))) def multigroupby(self, *args): """ Generate nested df based on multiple grouping keys Parameters ---------- args: str or sequence column(s) to index the DF """ if len(args) <= 0: yield self.data elif len(args) > 1: nested = True else: nested = False val = self.data[args[0]] ind = sorted(zip(val, range(len(val))), key=lambda x: x[0]) for k, grp in itertools.groupby(ind, lambda x: x[0]): index = [v[1] for v in grp] d = self.data.ary.__class__({a: np.array([b[i] for i in index]) for a, b in self.data.items()}) if nested: yield k, self.multigroupby(d, *args[1:]) else: yield k, d def pairplot(self, keys=None, **kwargs): """ This is a high-level interface for PairGrid intended to make it easy to draw a few common styles. Parameters ---------- keys: sequence work with only a set of keys if provided. Returns ------- p: PairGrid instance :class`PairGrid` which gives you all possible flexibility. """ if keys is None: keys = self.data.keys() return PairGrid(self, keys, allow_expressions=self.allow_expressions, **kwargs) def cornerplot(self, varnames=None, labels=None, figsize=None, **kwargs): """ This is a high-level interface for PairGrid making quickly a CornerPlot Parameters ---------- plotter: Plotter instance plotter to use. If a dataframe is provided, the default will be to use Plotter(plotter). varnames: seq(str) limit the plot to a subset of variables labels: seq(str) replace the variable names by provided labels figsize: tuple(height, width) Size of the figure. Default is a square of length 3 * len(varnames) kwargs: dict Forwarded to `Plotter.pairplot` method """ return CornerPlot(self, varnames=varnames, labels=labels, figsize=figsize, **kwargs) class PairGrid(object): """ Container to Plot pairwise relationships in a dataset. By default, this function will create a grid of Axes such that each variable in data will by shared in the y-axis across a single row and in the x-axis across a single column. The diagonal Axes could be treated differently: for instance drawing a plot to show the univariate distribution of the data for the variable in that column. It is also possible to show a subset of variables or plot different variables on the rows and columns. This class works also with Group instances. """ def __init__(self, data, keys, allow_expressions=False, **kwargs): self.data = data self.keys = list(keys) self.allow_expressions = allow_expressions self.show = plt.show nlines = ncols = len(self.keys) self.shape = (nlines, ncols) self.axes = np.empty((nlines, ncols), dtype=object) self.axes_dims = [] self.set_options(**kwargs) self._generate_grid() def set_options(self, **kwargs): self.lbls = kwargs.pop('labels', self.keys) self.ticksrotation = kwargs.pop('ticksrotation', 0) self.weights = kwargs.pop('weights', None) self.max_n_ticks = kwargs.pop('max_n_ticks', 5) return self def adjust(self, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None): """ adjust spacing between subplots using mpl.subplot_adjusts. """ plt.subplots_adjust(left, bottom, right, top, wspace, hspace) return self def _check_label_visibility(self): upper_visible = self.axes[0][-1]._visible lower_visible = self.axes[-1][0]._visible diago_visible = self.axes[0][0]._visible n_axes = len(self.axes) if diago_visible: # check the diagonal labels for k in range(n_axes): ax = self.axes[k][k] plt.setp(ax.get_xticklabels(), rotation=self.ticksrotation, visible=not lower_visible or (k == n_axes-1)) plt.setp(ax.get_yticklabels(), visible=not (upper_visible and lower_visible)) ax.tick_params(top=False, left=not lower_visible, right=(not upper_visible) and lower_visible, labelleft=not lower_visible, labelright=(not upper_visible and lower_visible)) ax.spines['right'].set_visible(lower_visible and not upper_visible) ax.spines['left'].set_visible(not lower_visible) ax.spines['top'].set_visible(False) if lower_visible: ax.set_xlabel('') ax = self.axes[-1][-1] ax.set_xlabel(self.lbls[-1]) plt.setp(ax.get_xticklabels(), visible=True) if upper_visible: for i in range(n_axes): for j in range(i + 1, n_axes): ax = self.axes[i][j] ax.spines['right'].set_visible(True) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(True) ax.spines['bottom'].set_visible(False) ax.tick_params(top=True, bottom=False, left=False, right=True, labelleft=False, labelright=True, labelbottom=False, labeltop=True) plt.setp(ax.get_xticklabels(), rotation=self.ticksrotation, visible=False or (i == 0)) plt.setp(ax.get_yticklabels(), rotation=self.ticksrotation, visible=False or (j == n_axes -1)) ax.xaxis.set_label_position('top') ax.yaxis.set_label_position('right') ax.set_xlabel('') ax.set_ylabel('') for k in range(1, n_axes): ax = self.axes[0][k] ax.set_xlabel(self.lbls[k]) plt.setp(ax.get_xticklabels(), visible=True) for k in range(0, n_axes - 1): ax = self.axes[k][-1] ax.set_ylabel(self.lbls[k]) plt.setp(ax.get_yticklabels(), visible=True) if lower_visible: for i in range(n_axes): for j in range(0, i): ax = self.axes[i][j] ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(True) ax.tick_params(top=False, bottom=True, right=False, left=True) plt.setp(ax.get_xticklabels(), rotation=self.ticksrotation, visible=False or (i == n_axes -1)) plt.setp(ax.get_yticklabels(), rotation=self.ticksrotation, visible=False or (j == 0)) ax.set_xlabel('') ax.set_ylabel('') for k in range(0, n_axes - 1): ax = self.axes[-1][k] ax.set_xlabel(self.lbls[k]) plt.setp(ax.get_xticklabels(), visible=True) for k in range(1, n_axes): ax = self.axes[k][0] ax.set_ylabel(self.lbls[k]) plt.setp(ax.get_yticklabels(), visible=True) def _generate_grid(self): nlines, ncols = self.shape keys = self.keys for k in range(nlines * ncols): yk, xk = np.unravel_index(k, self.shape) self.axes_dims.append((keys[xk], keys[yk])) sharey = None sharex = None if (xk >= 0): sharex = self.axes[0, xk] if (yk >= 0): sharey = self.axes[yk, 0] ax = plt.subplot(nlines, ncols, k + 1, sharey=sharey, sharex=sharex) ax.xaxis.set_major_locator(MaxNLocator(self.max_n_ticks, prune="both")) ax.yaxis.set_major_locator(MaxNLocator(self.max_n_ticks, prune="both")) if (xk > 0): plt.setp(ax.get_yticklabels(), visible=False) else: ax.set_ylabel(self.lbls[yk]) if (yk < nlines - 1): plt.setp(ax.get_xticklabels(), visible=False) else: ax.set_xlabel(self.lbls[xk]) ax.set_visible(False) self.axes[yk, xk] = ax def _value_from_data(self, key): """ Parse a key for existing data in the dataframe. If not found, returns the key directly """ if not isinstance(key, basestring): return key elif key not in self.data: if self.allow_expressions: try: return evalexpr(self.data, key) except Exception: pass return key else: return self.data[key] def evalexpr(self, expr, exprvars=None, dtype=float): """ evaluate expression based on the data and external variables all np function can be used (log, exp, pi...) Parameters ---------- data: dict or dict-like structure data frame / dict-like structure containing named columns expr: str expression to evaluate on the table includes mathematical operations and attribute names exprvars: dictionary, optional A dictionary that replaces the local operands in current frame. dtype: dtype definition dtype of the output array Returns ------- out : np.array array of the result """ return evalexpr(self.data, expr, exprvars=exprvars, dtype=dtype) def _apply(self, fn, *args, **kwargs): """ Apply a function fn to the data Parameters ---------- fn: callable or str function to apply or use a Plotter function is exists Returns ------- r: tuple whatever fn returns """ if isinstance(self.data, Group): r = [] for d in self.data: if isinstance(fn, basestring): _fn = getattr(d, fn) if _fn is None: raise AttributeError('No function named {0:s}' .format(fn)) r.append(_fn(*args, **kwargs)) else: r.append(fn(d, *args, **kwargs)) return r else: if isinstance(fn, basestring): _fn = getattr(self.data, fn) if _fn is None: raise AttributeError('No function named {0:s}'.format(fn)) return _fn(*args, **kwargs) else: return fn(self.data, *args, **kwargs) def map_diag(self, fn, *args, **kwargs): """Plot with a univariate function on each diagonal subplot. Parameters ---------- func: callable plotting function Must take an x array as a positional arguments and draw onto the "currently active" matplotlib Axes. There is a special case when using a ``hue`` variable and ``plt.hist``; the histogram will be plotted with stacked bars. only1d: bool set to make the function only use the x-axis instead of both. """ n, _ = self.shape r = [] only1d = kwargs.pop('only1d', False) nlines, ncols = self.shape if only1d: for ek, xk in enumerate(self.keys): ax = plt.subplot(nlines, ncols, (ncols + 1) * ek + 1, sharex=self.axes[0][ek]) self.axes[ek, ek] = ax plt.sca(ax) ax.set_visible(True) ax.set_xlabel(xk) r.append(self._apply(fn, xk, *args, **kwargs)) else: for ek, xk in enumerate(self.keys): ax = self.axes[ek, ek] plt.sca(ax) ax.set_visible(True) r.append(self._apply(fn, xk, xk, *args, **kwargs)) self._check_label_visibility() return r def map_offdiag(self, fn, *args, **kwargs): """Plot with a bivariate function on off-diagonal subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the "currently active" matplotlib Axes. """ r = [] for ax, (xk, yk) in zip(np.ravel(self.axes), self.axes_dims): if xk != yk: plt.sca(ax) ax.set_visible(True) r.append(self._apply(fn, xk, yk, *args, **kwargs)) self._check_label_visibility() return r def map_lower(self, fn, *args, **kwargs): """Plot with a bivariate function on the lower diagonal subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the "currently active" matplotlib Axes. """ lbl_k = -1 nlines, ncols = self.shape r = [] for yi in range(nlines): for xi in range(ncols): lbl_k += 1 if xi < yi: ax = self.axes[yi, xi] plt.sca(ax) ax.set_visible(True) xk, yk = self.axes_dims[lbl_k] r.append(self._apply(fn, xk, yk, *args, **kwargs)) self._check_label_visibility() return r def map_upper(self, fn, *args, **kwargs): """Plot with a bivariate function on the upper diagonal subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the "currently active" matplotlib Axes. """ lbl_k = -1 nlines, ncols = self.shape r = [] for yi in range(nlines): for xi in range(ncols): lbl_k += 1 if xi > yi: ax = self.axes[yi, xi] plt.sca(ax) ax.set_visible(True) xk, yk = self.axes_dims[lbl_k] r.append(self._apply(fn, xk, yk, *args, **kwargs)) self._check_label_visibility() return r def map(self, fn, *args, **kwargs): """Plot with a bivariate function on all subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the "currently active" matplotlib Axes. """ r = self.map_offdiag(fn, *args, **kwargs) r.extend(self.map_diag(fn, *args, **kwargs)) return r class CornerPlot(): """ Generates a corner plot rapidly Attributes ---------- plotter: Plotter instance plotter used by the class. pp: PairGrid instance Pair grid that will be used to make the plots """ def __init__(self, plotter, varnames=None, labels=None, figsize=None, **kwargs): """ Constructor Parameters ---------- plotter: Plotter instance plotter to use. If a dataframe is provided, the default will be to use Plotter(plotter). varnames: seq(str) limit the plot to a subset of variables labels: seq(str) replace the variable names by provided labels figsize: tuple(height, width) Size of the figure. Default is a square of length 3 * len(varnames) kwargs: dict Forwarded to `Plotter.pairplot` method """ if isinstance(plotter, Plotter): self.plotter = plotter else: self.plotter = Plotter(plotter) if varnames is None: varnames = list(self.plotter.keys) if labels is None: labels = varnames if (figsize is None): figsize = (3 * len(varnames), 3 * len(varnames)) plt.figure(figsize=figsize) self.pp = plotter.pairplot(varnames, labels=labels, **kwargs) def diag(self, fn=None, **kwargs): """ Make the diagonal plot using fn """ defaults = dict(only1d=True, bins=32, edgecolor='k', facecolor='None', histtype='step') if fn is None: defaults = dict(only1d=True, bins=32, edgecolor='k', facecolor='None', histtype='step') defaults.update(kwargs) self.pp.map_diag('hist', **defaults) else: defaults = dict(only1d=True) defaults.update(kwargs) self.pp.map_diag(fn, **defaults) return self @property def data(self): """ Get the dataframe directly """ return self.pp.data.data def add_quantiles(self, quantiles=[0.16, 0.5, 0.84]): """ Adds quantile indications on the diagonal plots """ for num, (kx, labelx) in enumerate(zip(self.pp.keys, self.pp.lbls)): ax = self.pp.axes[num][num] q_16, q_50, q_84 = np.quantile(self.data[kx], quantiles) q_m, q_p = q_50 - q_16, q_84 - q_50 # Format the quantile display. fmt = "{{0:{0}}}".format(".2f").format title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$" title = title.format(fmt(q_50), fmt(q_m), fmt(q_p)) title = "{0} = {1}".format(labelx, title) ax.set_title(title, fontsize='medium') ylim = ax.get_ylim() ax.vlines([q_16, q_50, q_84], ylim[0], ylim[1], color='k', linestyle='--') return self def lower(self, fn='plot', **kwargs): """ Makes the lower diagonal plots """ self.pp.map_lower('plot', **kwargs) return self def upper(self, fn='plot', **kwargs): """ Makes the upper diagonal plots """ self.pp.map_upper('plot', **kwargs) return self def _intercept_empty_plot(*args, **kwargs): """ fall back to empty plot when data is empty Mostly designed to produce plots when forced group selections are made """ ax = kwargs.pop('ax', None) if ax is None: ax = plt.gca() # ax.cla() ax.text(0.5, 0.5, 'No Data', ha='center', va='center', transform=ax.transAxes) # Adjust x, y ticks spacing. # plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False) # plt.setp(ax.get_xticklines() + ax.get_yticklines(), visible=False) def evalexpr(data, expr, exprvars=None, dtype=float): """ evaluate expression based on the data and external variables all np function can be used (log, exp, pi...) Parameters ---------- data: dict or dict-like structure data frame / dict-like structure containing named columns expr: str expression to evaluate on the table includes mathematical operations and attribute names exprvars: dictionary, optional A dictionary that replaces the local operands in current frame. dtype: dtype definition dtype of the output array Returns ------- out : np.array array of the result """ _globals = {} keys = [] if hasattr(data, 'keys'): keys += list(data.keys()) if hasattr(getattr(data, 'dtype', None), 'names'): keys += list(data.dtype.names) if hasattr(data, '_aliases'): # SimpleTable specials keys += list(data._aliases.keys()) keys = set(keys) for k in keys: if k in expr: _globals[k] = data[k] if exprvars is not None: if (not (hasattr(exprvars, 'items'))): msg = "Expecting a dict-like as condvars with an `items` method" raise AttributeError(msg) for k, v in (exprvars.items()): _globals[k] = v # evaluate expression, to obtain the final filter # r = np.empty( self.nrows, dtype=dtype) r = eval(expr, _globals, np.__dict__) return np.array(r, dtype=dtype) def create_common_cbar(vmin=0, vmax=1, box=None, **kwargs): """ Create a common colorbar to a complex figure Parameters ---------- vmin: float minimum value on the colorscale vmax: float maximum value on the colorscale box: tuple axis definition box Returns ------- cb: ColorBar instance the colorbar object """ if box is None: box = [0.3, 0.1, 0.6, 0.02] kw = dict(spacing='proportional', orientation='horizontal', cmap=plt.cm.jet) kw.update(**kwargs) norm = kw.pop('norm', None) if norm is None: norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) ax = plt.gcf().add_axes(box) cb = mpl.colorbar.ColorbarBase(ax, norm=norm, **kw) return cb def create_common_legend(labels, colors, markers='s', mec=None, linestyles='None', linewidths=None, fig=None, **kwargs): """ Create a legend from the symbols without the actual plot Parameters ---------- labels: seq sequence of label strings colors: seq or Colormap sequence of colors or Colormap instance from which deriving a sequence of colors to encode each group if Colormap instance, a cmap attribute will be generated after a plot and will refer to the updated instance markers: seq sequence of markers (will cycle through) default is `s`, i.e., a square mec: seq marker edge colors linestyles: seq sequence of linestyles (will cycle through) linewidths: seq sequence of linewidths (will cycle through) fig: plt.Figure figure to add a legend (default: `plt.gcf()`) kwargs: dict any other keyword will go to :func:`plt.legend` Returns ------- lgd: plt.Legend instance the newly created legend """ from matplotlib.lines import Line2D from itertools import cycle if fig is None: fig = plt.gcf() defaults = dict(numpoints=1, frameon=False) defaults.update(kwargs) if not hasattr(mec, '__iter__'): mec = [mec] if not hasattr(linewidths, '__iter__'): linewidths = [linewidths] lines = [] for lbl, color, m, ls, me, lw in zip(labels, colors, cycle(markers), cycle(linestyles), cycle(mec), cycle(linewidths)): line_ = Line2D(range(2), range(2), marker=m, mec=me, linestyle=ls, color=color, lw=lw) lines.append(line_) lgd = fig.legend(lines, labels, **defaults) plt.draw_if_interactive() return lgd def colorify(data, vmin=None, vmax=None, cmap=plt.cm.Spectral): """ Associate a color map to a quantity vector Parameters ---------- data: sequence values to encode vmin: float minimum value vmax: float maximum value cmap: Colormap instance colormap to use Returns ------- colors: sequence or array one color per input data cmap: Colormap data normalized colormap instance """ try: from matplotlib.colors import Normalize except ImportError: # old mpl from matplotlib.colors import normalize as Normalize _vmin = vmin or min(data) _vmax = vmax or max(data) cNorm = Normalize(vmin=_vmin, vmax=_vmax) scalarMap = plt.cm.ScalarMappable(norm=cNorm, cmap=cmap) try: colors = scalarMap.to_rgba(data) except Exception: colors = list(map(scalarMap.to_rgba, data)) scalarMap.set_array(data) return colors, scalarMap
{ "repo_name": "mfouesneau/ezdata", "path": "ezdata/plotter.py", "copies": "1", "size": "62725", "license": "mit", "hash": 588978340670407300, "line_mean": 31.8059623431, "line_max": 83, "alpha_frac": 0.5305540056, "autogenerated": false, "ratio": 4.126101828706749, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5156655834306748, "avg_score": null, "num_lines": null }
"""A conversion module for googletrans""" import json import re def build_params(query, src, dest, token, override): params = { 'client': 'webapp', 'sl': src, 'tl': dest, 'hl': dest, 'dt': ['at', 'bd', 'ex', 'ld', 'md', 'qca', 'rw', 'rm', 'ss', 't'], 'ie': 'UTF-8', 'oe': 'UTF-8', 'otf': 1, 'ssel': 0, 'tsel': 0, 'tk': token, 'q': query, } if override is not None: for key, value in get_items(override): params[key] = value return params def legacy_format_json(original): # save state states = [] text = original # save position for double-quoted texts for i, pos in enumerate(re.finditer('"', text)): # pos.start() is a double-quote p = pos.start() + 1 if i % 2 == 0: nxt = text.find('"', p) states.append((p, text[p:nxt])) # replace all wiered characters in text while text.find(',,') > -1: text = text.replace(',,', ',null,') while text.find('[,') > -1: text = text.replace('[,', '[null,') # recover state for i, pos in enumerate(re.finditer('"', text)): p = pos.start() + 1 if i % 2 == 0: j = int(i / 2) nxt = text.find('"', p) # replacing a portion of a string # use slicing to extract those parts of the original string to be kept text = text[:p] + states[j][1] + text[nxt:] converted = json.loads(text) return converted def get_items(dict_object): for key in dict_object: yield key, dict_object[key] def format_json(original): try: converted = json.loads(original) except ValueError: converted = legacy_format_json(original) return converted def rshift(val, n): """python port for '>>>'(right shift with padding) """ return (val % 0x100000000) >> n
{ "repo_name": "ssut/py-googletrans", "path": "googletrans/utils.py", "copies": "1", "size": "1945", "license": "mit", "hash": -610993380705895800, "line_mean": 23.6202531646, "line_max": 82, "alpha_frac": 0.5172236504, "autogenerated": false, "ratio": 3.52994555353902, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.454716920393902, "avg_score": null, "num_lines": null }
'''A conversion of Jon Skeet's LINQ Mandelbrot from LINQ to asq. The original can be found at http://msmvps.com/blogs/jon_skeet/archive/2008/02/26/visualising-the-mandelbrot-set-with-linq-yet-again.aspx ''' import colorsys #import Image from asq.initiators import integers, query def generate(start, func): value = start while True: yield value value = func(value) def colnorm(r, g, b): return (int(255 * r) - 1, int(255 * g) - 1, int(255 * b) - 1) def col(n, max): if n == max: return (0, 0, 0) return colnorm(colorsys.hsv_to_rgb(0.0, 1.0, float(n) / max)) def mandelbrot(): MaxIterations = 200 SampleWidth = 3.2 SampleHeight = 2.5 OffsetX = -2.1 OffsetY = -1.25 ImageWidth = 480 ImageHeight = int(SampleHeight * ImageWidth / SampleWidth) query = integers(0, ImageHeight).select(lambda y: (y * SampleHeight) / ImageHeight + OffsetY) \ .select_many_with_correspondence( lambda y: integers(0, ImageWidth).select(lambda x: (x * SampleWidth) / ImageWidth + OffsetX), lambda y, x: (x, y)) \ .select(lambda real_imag: complex(*real_imag)) \ .select(lambda c: query(generate(c, lambda x: x * x + c)) .take_while(lambda x: x.real ** 2 + x.imag ** 2 < 4) .take(MaxIterations) .count()) \ .select(lambda c: ((c * 7) % 255, (c * 5) % 255, (c * 11) % 255) if c != MaxIterations else (0, 0, 0)) data = q.to_list() image = Image.new("RGB", (ImageWidth, ImageHeight)) image.putdata(data) image.show() if __name__ == '__main__': mandelbrot()
{ "repo_name": "rob-smallshire/asq", "path": "asq/examples/mandelbrot.py", "copies": "6", "size": "1711", "license": "mit", "hash": -6501810235767690000, "line_mean": 27.5166666667, "line_max": 114, "alpha_frac": 0.5680888369, "autogenerated": false, "ratio": 3.1394495412844035, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.002417510811172414, "num_lines": 60 }
"""A convolutional neural network for MNIST classification. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys # Import data from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf FLAGS = None def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Create the model x = tf.placeholder(tf.float32, [None, 784]) # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10]) x_image = tf.reshape(x, [-1, 28, 28, 1]) W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) for i in range(20000): batch = mnist.train.next_batch(50) if i % 100 == 0: train_accuracy = accuracy.eval(feed_dict={ x: mnist.validation.images, y_: mnist.validation.labels, keep_prob: 1.0}) print("step %d, training accuracy %g"%(i, train_accuracy)) train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print("test accuracy %g"%accuracy.eval(feed_dict={ x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
{ "repo_name": "mengli/MachineLearning", "path": "mnist/mnist_conv.py", "copies": "2", "size": "2937", "license": "apache-2.0", "hash": -8654154573835021000, "line_mean": 28.9693877551, "line_max": 89, "alpha_frac": 0.6540687777, "autogenerated": false, "ratio": 2.8105263157894735, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9355480414047419, "avg_score": 0.021822935888410694, "num_lines": 98 }
# a convolution layer with a following pooling layer import numpy as np from sigmoid import sigmoid from sigmoid import sigmoid_d from flatten_poolingLayer_output import flatten_poolingLayer_output import conv2d from FullyConnectedLayer import FullyConnectedLayer class ConvPoolingLayer(object): def __init__(self, input_shape, filter_shape, pool_size=(2, 2), activation_fn=sigmoid, activation_fn_d=sigmoid_d, use_max_pooling=True): # store input values """`filter_shape` is a tuple of length 4, whose entries are the number of filters, the number of input feature maps, the filter height, and the filter width. `input_shape` is a tuple of length 4, whose entries are the mini-batch size, the number of input feature maps, the image height, and the image width. `pool_size` is a tuple of length 2, whose entries are the y and x pooling sizes. """ self.input_shape = input_shape self.filter_shape = filter_shape self.pool_size = pool_size self.use_max_pooling = use_max_pooling self.activation_fn = activation_fn self.activation_fn_d = activation_fn_d # number of outputs after pooling self.num_output = filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(pool_size) self.weights = np.random.normal(loc=0.0, scale=np.sqrt(1.0 / self.num_output), size=filter_shape) self.biases = np.random.normal(loc=0.0, scale=1.0, size=(filter_shape[0],)) # accept input, and return result of forward propagation def calculate(self, input_images): self.input_images = input_images conv_out = conv2d.conv2d(self.input_images, self.weights, self.input_shape) # add bias to conv_out # for each image in a mini batch for mb in xrange(self.input_shape[0]): # for each generated feature map for k in xrange(self.filter_shape[0] * self.filter_shape[1]): conv_out[mb][k] = np.asarray(conv_out[mb][k]) + self.biases[k] self.conv_out = conv_out # pooling and return if self.use_max_pooling: self.zs = [ [np.asarray(self.max_pooling(conv_out[mb][k])) for k in xrange(self.filter_shape[0] * self.filter_shape[1])] for mb in xrange(self.input_shape[0])] else: self.zs = [ [np.asarray(self.mean_pooling(conv_out[mb][k])) for k in xrange(self.filter_shape[0] * self.filter_shape[1])] for mb in xrange(self.input_shape[0])] self.out = [[np.asarray(self.activation_fn(self.zs[mb][k])) for k in xrange(self.filter_shape[0] * self.filter_shape[1])] for mb in xrange(self.input_shape[0])] return self.out def max_pooling(self, in_array): in_array_h, in_array_w = in_array.shape return [[np.amax(in_array[i:i + self.pool_size[0], j:j + self.pool_size[1]]) for i in xrange(0, in_array_h, self.pool_size[0])] for j in xrange(0, in_array_w, self.pool_size[1])] def mean_pooling(self, in_array): in_array_h, in_array_w = in_array.shape return [[np.mean(in_array[i:i + self.pool_size[0], j:j + self.pool_size[1]]) for i in xrange(0, in_array_h, self.pool_size[0])] for j in xrange(0, in_array_w, self.pool_size[1])] def BP(self, eta, BP_delta, next_layer, previous_layer_out): # return delta of this layer # and update weights / biases delta = [] # if the next layer is fully connect layer if (next_layer.__class__ == FullyConnectedLayer): out_shape = np.asarray(self.zs[0]).shape flat_zs = flatten_poolingLayer_output(self.zs) delta = [np.dot(w.transpose(), d) * self.activation_fn_d(z) for w, d, z in zip(next_layer.weights, BP_delta, flat_zs)] # restore the shape of delta, this is the delta on pooling layer delta = [im.reshape(out_shape) for im in delta] else: print 'Error! Not implemented yet!' # pooling layer does'nt need update # calculate delta on convolution layer if (self.use_max_pooling): delta = self.delta_BP_max_pooling(delta) else: print 'Error! Not implemented yet!' # update biases nabla_b = np.asarray(delta) nabla_b = nabla_b.mean(axis=0) nabla_b = nabla_b.mean(axis=1) nabla_b = nabla_b.mean(axis=1) self.biases = self.biases - eta * nabla_b # update weights conv_out_shape = self.conv_out[0][0].shape nabla_w_batch = [] # for each image in a mini batch for mb in xrange(self.input_shape[0]): nabla_w_im = [] # for each kernel for k in xrange(self.filter_shape[0]): nabla_w_f_in = [] # for each input feature map for f_in in xrange(self.input_shape[1]): target_output_feature_index = f_in * self.filter_shape[0] + k nabla_w_k = [] # for each element in conv_out for i in xrange(conv_out_shape[0]): for j in xrange(conv_out_shape[1]): nabla_w_conv_pnt = self.input_images[mb][f_in][ i:(i + self.filter_shape[2]), j:(j + self.filter_shape[3])] * \ self.conv_out[mb][target_output_feature_index][i, j] nabla_w_k.append(nabla_w_conv_pnt) nabla_w_k = np.asarray(nabla_w_k).mean(axis=0) nabla_w_f_in.append(nabla_w_k) nabla_w_im.append(nabla_w_f_in) nabla_w_batch.append(nabla_w_im) nabla_w = np.asarray(nabla_w_batch).mean(axis=0) # update network parameter self.weights = self.weights - eta * nabla_w def delta_BP_max_pooling(self, delta_in): # calculate delta on convolution when using max_pooling delta = np.zeros(np.asarray(self.conv_out).shape) conv_out_shape = self.conv_out[0][0].shape # for each image in a mini batch for mb in xrange(self.input_shape[0]): # for each feature map for f in xrange(self.filter_shape[0] * self.filter_shape[1]): # for each pool for i in xrange(0, conv_out_shape[0], self.pool_size[0]): for j in xrange(0, conv_out_shape[1], self.pool_size[1]): d = delta_in[mb][f][i / self.pool_size[0], j / self.pool_size[1]] target_conv_mat = self.conv_out[mb][f][i:(i + self.pool_size[0]), j:(j + self.pool_size[1])] delta[mb, f, i:(i + self.pool_size[0]), j:(j + self.pool_size[1])] \ [np.unravel_index(target_conv_mat.argmax(), target_conv_mat.shape)] = d return delta
{ "repo_name": "metorm/DeepLearningTutorial", "path": "ConvPoolingLayer.py", "copies": "1", "size": "7214", "license": "mit", "hash": -884299500220966300, "line_mean": 45.5419354839, "line_max": 117, "alpha_frac": 0.5547546437, "autogenerated": false, "ratio": 3.5155945419103314, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.956380603818654, "avg_score": 0.0013086294847583794, "num_lines": 155 }
"""A copy of Django 1.3.0's stock loaddata.py, adapted so that, instead of loading any data, it returns the tables referenced by a set of fixtures so we can truncate them (and no others) quickly after we're finished with them.""" import gzip import os import zipfile from django.conf import settings from django.core import serializers from django.db import DEFAULT_DB_ALIAS, router from django.db.models import get_apps # Remove this try/except block if the minimum Python version suported is 2.6 # as `product` was added in Python 2.6. try: from itertools import product except ImportError: # Deprecated in 1.5, removed in 1.7. from django.utils.itercompat import product try: import bz2 has_bz2 = True except ImportError: has_bz2 = False def tables_used_by_fixtures(fixture_labels, using=DEFAULT_DB_ALIAS): """Act like Django's stock loaddata command, but, instead of loading data, return an iterable of the names of the tables into which data would be loaded.""" # Keep a count of the installed objects and fixtures fixture_count = 0 loaded_object_count = 0 fixture_object_count = 0 tables = set() class SingleZipReader(zipfile.ZipFile): def __init__(self, *args, **kwargs): zipfile.ZipFile.__init__(self, *args, **kwargs) if settings.DEBUG: assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file." def read(self): return zipfile.ZipFile.read(self, self.namelist()[0]) compression_types = { None: file, 'gz': gzip.GzipFile, 'zip': SingleZipReader } if has_bz2: compression_types['bz2'] = bz2.BZ2File app_module_paths = [] for app in get_apps(): if hasattr(app, '__path__'): # It's a 'models/' subpackage for path in app.__path__: app_module_paths.append(path) else: # It's a models.py module app_module_paths.append(app.__file__) app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths] for fixture_label in fixture_labels: parts = fixture_label.split('.') if len(parts) > 1 and parts[-1] in compression_types: compression_formats = [parts[-1]] parts = parts[:-1] else: compression_formats = compression_types.keys() if len(parts) == 1: fixture_name = parts[0] formats = serializers.get_public_serializer_formats() else: fixture_name, format = '.'.join(parts[:-1]), parts[-1] if format in serializers.get_public_serializer_formats(): formats = [format] else: formats = [] if not formats: # stderr.write(style.ERROR("Problem installing fixture '%s': %s is # not a known serialization format.\n" % (fixture_name, format))) return set() if os.path.isabs(fixture_name): fixture_dirs = [fixture_name] else: fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + [''] for fixture_dir in fixture_dirs: # stdout.write("Checking %s for fixtures...\n" % # humanize(fixture_dir)) label_found = False for combo in product([using, None], formats, compression_formats): database, format, compression_format = combo file_name = '.'.join( p for p in [ fixture_name, database, format, compression_format ] if p ) # stdout.write("Trying %s for %s fixture '%s'...\n" % \ # (humanize(fixture_dir), file_name, fixture_name)) full_path = os.path.join(fixture_dir, file_name) open_method = compression_types[compression_format] try: fixture = open_method(full_path, 'r') if label_found: fixture.close() # stderr.write(style.ERROR("Multiple fixtures named # '%s' in %s. Aborting.\n" % (fixture_name, # humanize(fixture_dir)))) return set() else: fixture_count += 1 objects_in_fixture = 0 loaded_objects_in_fixture = 0 # stdout.write("Installing %s fixture '%s' from %s.\n" # % (format, fixture_name, humanize(fixture_dir))) try: objects = serializers.deserialize(format, fixture, using=using) for obj in objects: objects_in_fixture += 1 if router.allow_syncdb(using, obj.object.__class__): loaded_objects_in_fixture += 1 tables.add( obj.object.__class__._meta.db_table) loaded_object_count += loaded_objects_in_fixture fixture_object_count += objects_in_fixture label_found = True except (SystemExit, KeyboardInterrupt): raise except Exception: fixture.close() # stderr.write( style.ERROR("Problem installing # fixture '%s': %s\n" % (full_path, ''.join(tra # ceback.format_exception(sys.exc_type, # sys.exc_value, sys.exc_traceback))))) return set() fixture.close() # If the fixture we loaded contains 0 objects, assume that an # error was encountered during fixture loading. if objects_in_fixture == 0: # stderr.write( style.ERROR("No fixture data found # for '%s'. (File format may be invalid.)\n" % # (fixture_name))) return set() except Exception: # stdout.write("No %s fixture '%s' in %s.\n" % \ (format, # fixture_name, humanize(fixture_dir))) pass return tables
{ "repo_name": "jbalogh/test-utils", "path": "test_utils/fixture_tables.py", "copies": "1", "size": "6601", "license": "bsd-3-clause", "hash": 6228623059351823000, "line_mean": 40, "line_max": 103, "alpha_frac": 0.5106802, "autogenerated": false, "ratio": 4.658433309809457, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5669113509809457, "avg_score": null, "num_lines": null }
'''A Coral wrapper for the MAFFT command line multiple sequence aligner.''' import coral import os import shutil import subprocess import tempfile def MAFFT(sequences, gap_open=1.53, gap_extension=0.0, retree=2): '''A Coral wrapper for the MAFFT command line multiple sequence aligner. :param sequences: A list of sequences to align. :type sequences: List of homogeneous sequences (all DNA, or all RNA, etc.) :param gap_open: --op (gap open) penalty in MAFFT cli. :type gap_open: float :param gap_extension: --ep (gap extension) penalty in MAFFT cli. :type gap_extension: float :param retree: Number of times to build the guide tree. :type retree: int ''' arguments = ['mafft'] arguments += ['--op', str(gap_open)] arguments += ['--ep', str(gap_extension)] arguments += ['--retree', str(retree)] arguments.append('input.fasta') tempdir = tempfile.mkdtemp() try: with open(os.path.join(tempdir, 'input.fasta'), 'w') as f: for i, sequence in enumerate(sequences): if hasattr(sequence, 'name'): name = sequence.name else: name = 'sequence{}'.format(i) f.write('>{}\n'.format(name)) f.write(str(sequence) + '\n') process = subprocess.Popen(arguments, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'), cwd=tempdir) stdout = process.communicate()[0] finally: shutil.rmtree(tempdir) # Process stdout into something downstream process can use records = stdout.split('>') # First line is now blank records.pop(0) aligned_list = [] for record in records: lines = record.split('\n') name = lines.pop(0) aligned_list.append(coral.DNA(''.join(lines))) return aligned_list
{ "repo_name": "klavinslab/coral", "path": "coral/analysis/_sequencing/mafft.py", "copies": "1", "size": "1898", "license": "mit", "hash": 2487104441286565400, "line_mean": 33.5090909091, "line_max": 77, "alpha_frac": 0.5990516333, "autogenerated": false, "ratio": 3.9707112970711296, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 55 }
# Acorn 2.0: Cocoa Butter # Booleans are treated as integers # Allow hex #Number meta class class N(): def __init__(self,n): try: self.n = float(n) if(self.n.is_integer()): self.n = int(n) except: self.n = int(n,16) def N(self): return self.n def __repr__(self): return 'N(%s)' % self.n #Boolean meta class class B(): def __init__(self,b): self.boolean = b def B(self): if(self.boolean == "true"): return 1 elif(self.boolean == "false"): return 0 elif(isinstance(self.boolean,B)): return int((self.boolean).B()) elif(isinstance(self.boolean,N)): return int(bool(self.boolean.N())) elif(self.boolean == True): return 1 else: return 0 def __repr__(self): return 'B(\'%s\')' % self.boolean #String meta class class S(): def __init__(self,s): self.s = str(s) def S(self): return self.s def __repr__(self): return 'S(\'%s\')' % self.s #Var meta class class Var(): def __init__(self,x): self.x = str(x) def X(self): return self.x def __repr__(self): return 'Var(\'%s\')' % self.x #Null meta class class Null(): def __init__(self): self.n = "Null" def null(self): return self.n def __repr__(self): return 'Null()' #Unary meta operations class Unary(): def __init__(self,uop,e1): self.op = str(uop) self.e1 = e1 def expr1(self): return self.e1 def uop(self): return self.op def __repr__(self): return 'Unary(%s,%s)' % (self.op, self.e1) #Binary meta operations class Binary(): def __init__(self,bop,e1,e2): self.op = str(bop) self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def bop(self): return self.op def __repr__(self): return 'Binary(%s,%s,%s)' % (self.op, self.e1, self.e2) #Trinary meta operator class If(): def __init__(self,e1,e2,e3): self.e1 = e1 self.e2 = e2 self.e3 = e3 def expr1(self): return self.e1 def expr2(self): return self.e2 def expr3(self): return self.e3 def __repr__(self): return 'If(%s,%s,%s)' % (self.e1, self.e2, self.e3) class Function(): def __init__(self,arguments,body): self.e1 = arguments self.e2 = body def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Function(%s,%s)' % (self.e1, self.e2) class Call(): def __init__(self,arguments,body): self.e1 = arguments self.e2 = body def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Call(%s,%s)' % (self.e1, self.e2) class Return(): def __init__(self,returns): self.e1 = returns def expr1(self): return self.e1 def __repr__(self): return 'Return(%s)' % (self.e1) class Seq(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Seq(%s,%s)' % (self.e1, self.e2) class Eq(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Eq(%s,%s)' % (self.e1, self.e2) class Ne(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Ne(%s,%s)' % (self.e1, self.e2) class Lt(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Lt(%s,%s)' % (self.e1, self.e2) class Le(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Le(%s,%s)' % (self.e1, self.e2) class Gt(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Gt(%s,%s)' % (self.e1, self.e2) class Ge(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Ge(%s,%s)' % (self.e1, self.e2) class And(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'And(%s,%s)' % (self.e1, self.e2) class Or(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Or(%s,%s)' % (self.e1, self.e2) class BitwiseAnd(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Intersect(%s,%s)' % (self.e1, self.e2) class BitwiseOr(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Union(%s,%s)' % (self.e1, self.e2) class LeftShift(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'LeftShift(%s,%s)' % (self.e1, self.e2) class RightShift(): def __init__(self,e1,e2): self.e1 = e1 self.e2 = e2 def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'RightShift(%s,%s)' % (self.e1, self.e2) class Malloc(): def __init__(self,m,x,v): self.e1 = m self.e2 = x self.e3 = v def expr1(self): return self.e1 def expr2(self): return self.e2 def expr3(self): return self.e3 def __repr__(self): return 'Malloc(%s,%s,%s)' % (self.e1, self.e2, self.e3) class Array(): def __init__(self,e1): self.e1 = e1 def expr1(self): return self.e1 def __repr__(self): return 'Array(%s)' % self.e1 class Index(): def __init__(self,array,index): self.e1 = array self.e2 = index def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Index(%s,%s)' % (self.e1, self.e2) class Assign(): def __init__(self,var,val): self.e1 = var self.e2 = val def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Assign(%s,%s)' % (self.e1, self.e2) class ForEach(): def __init__(self,i,start,end,scope,closure): self.e1 = i self.e2 = start self.e3 = end self.e4 = scope self.e5 = closure def expr1(self): return self.e1 def expr2(self): return self.e2 def expr3(self): return self.e3 def expr4(self): return self.e4 def expr5(self): return self.e5 def __repr__(self): return 'ForEach(%s,%s,%s,%s,%s)' % (self.e1, self.e2, self.e3, self.e4, self.e5) class For(): def __init__(self,index,condition,count,scope): self.e1 = index self.e2 = condition self.e3 = count self.e4 = scope def expr1(self): return self.e1 def expr2(self): return self.e2 def expr3(self): return self.e3 def expr4(self): return self.e4 def __repr__(self): return 'For(%s,%s,%s,%s)' % (self.e1, self.e2, self.e3, self.e4) class While(): def __init__(self,condition,scope): self.e1 = condition self.e2 = scope def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'While(%s,%s)' % (self.e1, self.e2) #Side effects class Print(): def __init__(self,expr): self.expr1 = expr def E(self): return self.expr1 def __repr__(self): return 'Print(%s)' % self.expr1 #Side effects class Println(): def __init__(self,expr): self.expr1 = expr def E(self): return self.expr1 class Input(): def __init__(self): self.expr1 = None def cast(self,n): if(isfloat(n)): return N(n) if(n=="true" or n=="false"): return B(n) if(n=="null"): return Null() return S(n) def __repr__(self): return 'Input(%s)' % (self.expr1) class Cast(): def __init__(self,value,type): self.e1 = value self.e2 = type def cast(self,value,type): if(isinstance(type,TInt)): try: n = N(int(value)) return n except: return False elif(isinstance(type,TS)): try: s = S(str(value)) return s except: return False elif(isinstance(type,TFloat)): try: f = N(float(value)) return f except: return False elif(isinstance(type,TB)): try: b = B(bool(value)) return b except: return False return False def expr1(self): return self.e1 def expr2(self): return self.e2 def __repr__(self): return 'Cast(%s,%s)' % (self.e1, self.e2) class TInt(): def __init__(self): self.e1 = None def __repr__(self): return 'Int' class TFloat(): def __init__(self): self.e1 = None def __repr__(self): return 'Float' class TB(): def __init__(self): self.e1 = None def __repr__(self): return 'Bool' % (self.e1) class TS(): def __init__(self): self.e1 = None def __repr__(self): return 'String' #Helper functions def isfloat(n): try: float(n) return True except: return False
{ "repo_name": "mita4829/Acorn", "path": "src/Foundation.py", "copies": "1", "size": "10840", "license": "mit", "hash": -1489787082479614700, "line_mean": 21.8691983122, "line_max": 88, "alpha_frac": 0.4964944649, "autogenerated": false, "ratio": 3.1311380704794916, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9001398464070108, "avg_score": 0.025246814261876697, "num_lines": 474 }
#Acorn 2.0: Cocoa Butter #No direct access to stack and heap. Stack data structure with dictionaries for lexical scoping. More efficent usage of dense dictionaries in python3.6+ #String concats #No direct stepping access to raw values #Logical operators with short-cir effect from sys import exit import Foundation import Memory def case(expr,typep): return isinstance(expr,typep) def isValue(expr): return isinstance(expr,Foundation.N) or isinstance(expr,Foundation.B) or isinstance(expr,Foundation.S) or isinstance(expr,Foundation.Null) or isinstance(expr,Foundation.Function) or isinstance(expr,Foundation.Array) def isfloat(n): try: float(n) return True except: return False def step(expr,Env): #N if(case(expr,Foundation.N)): return expr.N() #B elif(case(expr,Foundation.B)): return expr.B() #S elif(case(expr,Foundation.S)): return expr.S() #Function elif(case(expr,Foundation.Function)): return expr #Var elif(case(expr,Foundation.Var)): varName = expr.X() rtn = Env.requestVar(varName) if(rtn == None): exit("Dynamic run time error. Undefined variable "+str(varName)) return rtn #Null elif(case(expr,Foundation.Null)): return expr.null() #Print elif(case(expr,Foundation.Print) or case(expr,Foundation.Println)): value = expr.E() while(not isValue(value)): value = step(value,Env) value = step(value,Env) newline = '' if(case(expr,Foundation.Print)): newline = '\n' if(isfloat(value)): if((value % 1) == 0): print(str(int(value)),end=newline) else: print(str(value),end=newline) else: print(str(value),end=newline) #Input elif(case(expr,Foundation.Input)): castToken = input() return expr.cast(castToken) #Unary elif(case(expr,Foundation.Unary) and isValue(expr.expr1())): if(expr.uop() == "Neg"): return Foundation.N(-1*step(expr.expr1(),Env)) elif(expr.uop() == "Not"): return Foundation.B(not step(expr.expr1(),Env)) elif(expr.uop() == "Inv"): return Foundation.N(~int(step(expr.expr1(),Env))) #Binary elif(case(expr,Foundation.Binary) and isValue(expr.expr1()) and isValue(expr.expr2())): if(expr.bop() == "Plus"): e1 = expr.expr1() e2 = expr.expr2() if(case(e1,Foundation.S) and case(e2,Foundation.S)): return Foundation.S(step(e1,Env)+step(e2,Env)) return Foundation.N(step(expr.expr1(),Env)+step(expr.expr2(),Env)) elif(expr.bop() == "Minus"): return Foundation.N(step(expr.expr1(),Env)-step(expr.expr2(),Env)) elif(expr.bop() == "Times"): return Foundation.N(step(expr.expr1(),Env)*step(expr.expr2(),Env)) elif(expr.bop() == "Div"): return Foundation.N(step(expr.expr1(),Env)/step(expr.expr2(),Env)) elif(expr.bop() == "Mod"): return Foundation.N(step(expr.expr1(),Env)%step(expr.expr2(),Env)) #Malloc elif(case(expr,Foundation.Malloc) and isValue(expr.expr3())): Env.malloc(expr.expr2().X(),expr.expr3()) #Assign elif(case(expr,Foundation.Assign) and isValue(expr.expr2())): #Work on index if(case(expr.expr1(),Foundation.Index)): array = Env.requestVar(expr.expr1().expr1().X()).expr1() index = step(expr.expr1().expr2(),Env) while(not isValue(index)): index = step(index,Env) index = int(step(index,Env)) array[index] = expr.expr2() return varName = expr.expr1().X() Env.assignVar(varName,expr.expr2()) return #Index elif(case(expr,Foundation.Index) and isValue(expr.expr2())): array = Env.requestVar(expr.expr1().X()).expr1() index = step(expr.expr2(),Env) if(int(index) >= len(array) or int(index) < 0): exit("Dynamic run time error, attempt to access out of bound memory for array") return array[int(index)] #If elif(case(expr,Foundation.If) and isValue(expr.expr1())): if(step(expr.expr1(),Env)): Env.pushLocalStack() rtn = step(expr.expr2(),Env) Env.popLocalStack() return rtn else: Env.pushLocalStack() rtn = step(expr.expr3(),Env) Env.popLocalStack() return rtn #Seq elif(case(expr,Foundation.Seq)): e1 = step(expr.expr1(),Env) if(case(e1,Foundation.Return)): return e1 e2 = step(expr.expr2(),Env) if(case(e2,Foundation.Return)): return e2 return #Eq elif(case(expr,Foundation.Eq) and isValue(expr.expr1()) and isValue(expr.expr2())): return Foundation.B(step(expr.expr1(),Env) == step(expr.expr2(),Env)) #Ne elif(case(expr,Foundation.Ne) and isValue(expr.expr1()) and isValue(expr.expr2())): return Foundation.B(step(expr.expr1(),Env) != step(expr.expr2(),Env)) #Lt elif(case(expr,Foundation.Lt) and isValue(expr.expr1()) and isValue(expr.expr2())): return Foundation.B(step(expr.expr1(),Env) < step(expr.expr2(),Env)) #Gt elif(case(expr,Foundation.Gt) and isValue(expr.expr1()) and isValue(expr.expr2())): return Foundation.B(step(expr.expr1(),Env) > step(expr.expr2(),Env)) #Le elif(case(expr,Foundation.Le) and isValue(expr.expr1()) and isValue(expr.expr2())): return Foundation.B(step(expr.expr1(),Env) <= step(expr.expr2(),Env)) #Ge elif(case(expr,Foundation.Ge) and isValue(expr.expr1()) and isValue(expr.expr2())): return Foundation.B(step(expr.expr1(),Env) >= step(expr.expr2(),Env)) #And elif(case(expr,Foundation.And) and isValue(expr.expr1()) and isValue(expr.expr2())): a = step(expr.expr1(),Env) if(not a): return Foundation.B(False) return Foundation.B(step(expr.expr2(),Env)) #Or elif(case(expr,Foundation.Or) and isValue(expr.expr1()) and isValue(expr.expr2())): a = step(expr.expr1(),Env) if(a): return Foundation.B(True) return Foundation.B(step(expr.expr2(),Env)) #Left elif(case(expr,Foundation.LeftShift) and isValue(expr.expr1()) and isValue(expr.expr2())): p = step(expr.expr1(),Env) q = step(expr.expr2(),Env) return Foundation.N(int(p) << int(q)) #Left elif(case(expr,Foundation.RightShift) and isValue(expr.expr1()) and isValue(expr.expr2())): p = step(expr.expr1(),Env) q = step(expr.expr2(),Env) return Foundation.N(int(p) >> int(q)) #cast elif(case(expr,Foundation.Cast) and isValue(expr.expr1())): value = step(expr.expr1(),Env) t = expr.cast(value,expr.expr2()) if(not t): exit("Dynamic run time error, cannot cast "+str(value)+" to type "+str(expr.expr2())) return t #Call elif(case(expr,Foundation.Call)): #For each argument, step until they are values argVal = [] for i in range(0,len(expr.expr1())): value = expr.expr1()[i] while(not isValue(value)): value = step(value,Env) argVal.append(value) functionName = expr.expr2().X() #Function object is an instant of the function functionObject = Env.requestVar(functionName) #Function arg names is a list of the defined function argument names functionArgNames = functionObject.expr1() functionBody = functionObject.expr2() #Begin subsituting values in the function # for i in range(0,len(argVal)): #functionBody = subsitute(functionBody,argVal[i],functionArgNames[i].X()) Env.pushLocalStack() for i in range(0,len(argVal)): step(Foundation.Malloc("Var",functionArgNames[i],argVal[i]),Env) rtn = step(functionBody,Env) if(case(rtn,Foundation.Return)): if(not case(rtn.expr1(),Foundation.Null)): rtn = rtn.expr1() if(not isValue(rtn)): rtn = step(rtn,Env) Env.popLocalStack() return rtn Env.popLocalStack() return rtn.expr1() Env.popLocalStack() return Foundation.Null() #Return elif(case(expr,Foundation.Return)): return expr #ForEach elif(case(expr,Foundation.ForEach) and isValue(expr.expr2()) and isValue(expr.expr3())): index = expr.expr1() start = int(step(expr.expr2(),Env)) end = int(step(expr.expr3(),Env)) scope = expr.expr4() closure = expr.expr5() Env.pushLocalStack() if(closure == "<"): for i in range(start,end): step(Foundation.Malloc("Var",Foundation.Var(index),Foundation.N(i)),Env) step(scope,Env) elif(closure == "<="): for i in range(start,end+1): step(Foundation.Malloc("Var",Foundation.Var(index),Foundation.N(i)),Env) step(scope,Env) else: exit("Dynamic run time error, cannot sequence range of index "+str(closure)) Env.popLocalStack() return expr #For loop elif(case(expr,Foundation.For)): index = expr.expr1() indexVar = index.expr2() condition = expr.expr2() count = expr.expr3() scope = expr.expr4() Env.pushLocalStack() #Initiate starting variable step(index,Env) while(step(condition,Env).B()): #Run body with subsitute of index variable #step(subsitute(scope, Foundation.N(step(Env.requestVar(indexVar.X()),Env)), indexVar),Env) for i in range(0,len(scope)): step(scope[i],Env) #Update the counter step(count,Env) Env.popLocalStack() return expr #While elif(case(expr,Foundation.While)): condition = expr.expr1() scope = expr.expr2() Env.pushLocalStack() while(step(condition,Env).B()): step(scope,Env) Env.popLocalStack() return expr #BitAnd elif(case(expr,Foundation.BitwiseAnd) and isValue(expr.expr1()) and isValue(expr.expr2())): p = step(expr.expr1(),Env) q = step(expr.expr2(),Env) return Foundation.N(int(p) & int(q)) #BitOr elif(case(expr,Foundation.BitwiseOr) and isValue(expr.expr1()) and isValue(expr.expr2())): p = step(expr.expr1(),Env) q = step(expr.expr2(),Env) return Foundation.N(int(p) | int(q)) #Binary Expr elif(case(expr,Foundation.Binary)): a = isValue(expr.expr1()) b = isValue(expr.expr2()) e1 = expr.expr1() e2 = expr.expr2() if((not a) and (not b)): return step(Foundation.Binary(expr.bop(),step(e1,Env),step(e2,Env)),Env) if(not a): return step(Foundation.Binary(expr.bop(),step(e1,Env),e2),Env) return step(Foundation.Binary(expr.bop(),e1,step(e2,Env)),Env) #Malloc Expr elif(case(expr,Foundation.Malloc)): return step(Foundation.Malloc("Var",expr.expr2(),step(expr.expr3(),Env)),Env) #If Expr elif(case(expr,Foundation.If)): value = expr.expr1() while(not isValue(value)): value = step(value,Env) return step(Foundation.If(value,expr.expr2(),expr.expr3()),Env) #Eq Expr elif(case(expr,Foundation.Eq)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.Eq(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.Eq(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.Eq(expr1,expr2),Env) #Ne Expr elif(case(expr,Foundation.Ne)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.Ne(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.Ne(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.Ne(expr1,expr2),Env) #Gt Expr elif(case(expr,Foundation.Gt)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.Gt(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.Gt(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.Gt(expr1,expr2),Env) #Lt Expr elif(case(expr,Foundation.Lt)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.Lt(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.Lt(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.Lt(expr1,expr2),Env) #Gt Expr elif(case(expr,Foundation.Ge)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.Ge(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.Ge(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.Ge(expr1,expr2),Env) #Lt Expr elif(case(expr,Foundation.Le)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.Le(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.Le(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.Le(expr1,expr2),Env) #And Expr elif(case(expr,Foundation.And)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.And(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.And(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.And(expr1,expr2),Env) #Or Expr elif(case(expr,Foundation.Or)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.Or(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.Or(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.Or(expr1,expr2),Env) elif(case(expr,Foundation.BitwiseOr)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.BitwiseOr(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.BitwiseOr(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.BitwiseOr(expr1,expr2),Env) elif(case(expr,Foundation.BitwiseAnd)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.BitwiseAnd(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.BitwiseAnd(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.BitwiseAnd(expr1,expr2),Env) elif(case(expr,Foundation.LeftShift)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.LeftShift(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.LeftShift(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.LeftShift(expr1,expr2),Env) elif(case(expr,Foundation.RightShift)): e1 = isValue(expr.expr1()) e2 = isValue(expr.expr2()) if(e1): expr2 = step(expr.expr2(),Env) return step(Foundation.RightShift(expr.expr1(),expr2),Env) elif(e2): expr1 = step(expr.expr1(),Env) return step(Foundation.RightShift(expr1,expr.expr2()),Env) expr1 = step(expr.expr1(),Env) expr2 = step(expr.expr2(),Env) return step(Foundation.RightShift(expr1,expr2),Env) #Assign Expr elif(case(expr,Foundation.Assign)): return step(Foundation.Assign(expr.expr1(),step(expr.expr2(),Env)),Env) elif(case(expr,Foundation.Unary)): expr1 = step(expr.expr1(),Env) return step(Foundation.Unary(expr.uop(),expr1),Env) #Index Expr elif(case(expr,Foundation.Index)): index = step(expr.expr2(),Env) return step(Foundation.Index(expr.expr1(),index),Env); elif(case(expr,Foundation.Cast)): e1 = step(expr.expr1(),Env) return step(Foundation.Cast(e1,expr.expr2()),Env) else: print("Uncaught parse step:"+str(expr)) return Foundation.Null()
{ "repo_name": "mita4829/Acorn", "path": "src/Parser.py", "copies": "1", "size": "18526", "license": "mit", "hash": 6776410100933722000, "line_mean": 33.1808118081, "line_max": 220, "alpha_frac": 0.570819389, "autogenerated": false, "ratio": 3.4351937697014647, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.45060131587014646, "avg_score": null, "num_lines": null }
#Acorn v1.0 from sys import exit import Foundation import Memory def case(expr,typep): return isinstance(expr,typep) def isValue(expr): return isinstance(expr,Foundation.N) or isinstance(expr,Foundation.B) or isinstance(expr,Foundation.S) or isinstance(expr,Foundation.Null) or isinstance(expr,Foundation.Function) or isinstance(expr,Foundation.Array) def isfloat(n): try: float(n) return True except: return False def isRaw(val): if((type(val) == str) or (type(val) == float) or (type(val) == bool) or (type(val) == int)): return True else: return False def subsitute(expr,value,x): #N if(case(expr,Foundation.N)): return expr #B elif(case(expr,Foundation.B)): return expr #S elif(case(expr,Foundation.S)): return expr #Null elif(case(expr,Foundation.Null)): return expr #Var elif(case(expr,Foundation.Var)): if(x == expr.X()): return value else: return expr #Binary elif(case(expr,Foundation.Binary)): return Foundation.Binary(expr.bop(),subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #Eq elif(case(expr,Foundation.Eq)): return Foundation.Eq(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #Ne elif(case(expr,Foundation.Ne)): return Foundation.Ne(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #Lt elif(case(expr,Foundation.Lt)): return Foundation.Lt(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #Le elif(case(expr,Foundation.Le)): return Foundation.Le(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #Ge elif(case(expr,Foundation.Ge)): return Foundation.Ge(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #Gt elif(case(expr,Foundation.Gt)): return Foundation.Gt(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #If elif(case(expr,Foundation.If)): return Foundation.If(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x),subsitute(expr.expr3(),value,x)) #Seq elif(case(expr,Foundation.Seq)): e1 = subsitute(expr.expr1(),value,x) e2 = subsitute(expr.expr2(),value,x) return Foundation.Seq(e1,e2) elif(case(expr,Foundation.Call)): return Foundation.Call(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) #Return elif(case(expr,Foundation.Return)): return Foundation.Return(subsitute(expr.expr1(),value,x)) #print elif(case(expr,Foundation.Print)): return Foundation.Print(subsitute(expr.E(),value,x)) #Malloc elif(case(expr,Foundation.Malloc)): return Foundation.Malloc(expr.expr1(),expr.expr2(),subsitute(expr.expr3(),value,x)) #Index elif(case(expr, Foundation.Index)): return Foundation.Index(expr.expr1(),subsitute(expr.expr2(),value,x)) elif(case(expr,Foundation.Assign)): if(case(expr.expr1(),Foundation.Index)): return Foundation.Assign(subsitute(expr.expr1(),value,x),subsitute(expr.expr2(),value,x)) return Foundation.Assign(expr.expr1(),subsitute(expr.expr2(),value,x)) else: print(expr) print("Uncaught subsitute") def step(expr,stack,heap): #print(heap.heap) #Base cases #N if(case(expr,Foundation.N)): return expr.N() #B elif(case(expr,Foundation.B)): return expr.B() #S elif(case(expr,Foundation.S)): return expr.S() #Function elif(case(expr,Foundation.Function)): return expr #Var elif(case(expr,Foundation.Var)): x = expr.X() callStack = stack.stackCall(x) callHeap = heap.heapCall(x) if((callStack == "DNE") and (callHeap == "DNE")): exit("Acorn: Use of variable "+str(x)+" before declaration.") if(callStack != "DNE"): return callStack else: return callHeap #Null elif(case(expr,Foundation.Null)): return expr.null() #Print elif(case(expr,Foundation.Print)): if(isValue(expr.E())): #print("isValue") val = expr.E() if(case(val,Foundation.Var)): val = step(val,stack,heap) valFinal = step(val,stack,heap) if(isfloat(valFinal)): if((valFinal % 1) == 0): print(str(int(valFinal))) return print(str(valFinal)) elif(isRaw(expr.E())): #print("isRaw") valFinal = expr.E() if(isfloat(valFinal)): if((valFinal % 1) == 0): print(str(int(valFinal))) return print(str(valFinal)) else: val = expr.E() #print("isExpr") while(not isValue(val) and (type(val) != str) and (type(val) != float) and(type(val) != bool)): val = step(val,stack,heap) valFinal = val if(not isRaw(valFinal)): valFinal = step(valFinal,stack,heap) if(isfloat(valFinal)): if((valFinal % 1) == 0): print(str(int(valFinal))) return print(str(valFinal)) return #Unary Needs to refactor laters also with Binary elif(case(expr,Foundation.Unary) and isValue(expr.expr1())): if(expr.uop() == "Neg"): return Foundation.N(-1*step(expr.expr1(),stack,heap)) elif(expr.uop() == "Not"): return Foundation.B(not step(expr.expr1(),stack,heap)) #Binary elif(case(expr,Foundation.Binary) and isValue(expr.expr1()) and isValue(expr.expr2())): if(expr.bop() == "Plus"): return Foundation.N(step(expr.expr1(),stack,heap)+step(expr.expr2(),stack,heap)) elif(expr.bop() == "Minus"): return Foundation.N(step(expr.expr1(),stack,heap)-step(expr.expr2(),stack,heap)) elif(expr.bop() == "Times"): return Foundation.N(step(expr.expr1(),stack,heap)*step(expr.expr2(),stack,heap)) elif(expr.bop() == "Div"): return Foundation.N(step(expr.expr1(),stack,heap)/step(expr.expr2(),stack,heap)) elif(expr.bop() == "Mod"): return Foundation.N(step(expr.expr1(),stack,heap)%step(expr.expr2(),stack,heap)) #If elif(case(expr,Foundation.If) and isValue(expr.expr1())): if( step(expr.expr1(),stack,heap) ): return step(expr.expr2(),stack,heap) else: return step(expr.expr3(),stack,heap) #Seq elif(case(expr,Foundation.Seq)): e1 = step(expr.expr1(),stack,heap) if(case(e1,Foundation.Return)): return e1 e2 = step(expr.expr2(),stack,heap) if(case(e2,Foundation.Return)): return e2 return #Return elif(case(expr,Foundation.Return)): return expr #ForEach elif(case(expr,Foundation.ForEach)): indexName = expr.expr1() start = int(expr.expr2().N()) end = int(expr.expr3().N()) scope = expr.expr4() closure = expr.expr5() if(closure == "<"): for i in range(start,end): step(subsitute(scope,Foundation.N(i),indexName),stack,heap) elif(closure == "<="): for i in range(start,end+1): step(subsitute(scope,Foundation.N(i),indexName),stack,heap) else: print("Acorn: Cannot sequence range of index "+str(closure)) exit() return expr #Recursive functions #Eq elif(case(expr,Foundation.Eq)): e1 = expr.expr1() e2 = expr.expr2() while(not isValue(e1)): e1 = step(e1,stack,heap) while(not isValue(e2)): e2 = step(e1,stack,heap) return Foundation.B(step(e1,stack,heap) == step(e2,stack,heap)) #Ne elif(case(expr,Foundation.Ne)): e1 = expr.expr1() e2 = expr.expr2() while(not isValue(e1)): e1 = step(e1,stack,heap) while(not isValue(e2)): e2 = step(e1,stack,heap) return Foundation.B(step(e1,stack,heap) != step(e2,stack,heap)) #Lt elif(case(expr,Foundation.Lt)): e1 = expr.expr1() e2 = expr.expr2() while(not isValue(e1)): e1 = step(e1,stack,heap) while(not isValue(e2)): e2 = step(e1,stack,heap) return Foundation.B(step(e1,stack,heap) < step(e2,stack,heap)) #Le elif(case(expr,Foundation.Le)): e1 = expr.expr1() e2 = expr.expr2() while(not isValue(e1)): e1 = step(e1,stack,heap) while(not isValue(e2)): e2 = step(e1,stack,heap) return Foundation.B(step(e1,stack,heap) <= step(e2,stack,heap)) #Ge elif(case(expr,Foundation.Ge)): e1 = expr.expr1() e2 = expr.expr2() while(not isValue(e1)): e1 = step(e1,stack,heap) while(not isValue(e2)): e2 = step(e1,stack,heap) return Foundation.B(step(e1,stack,heap) >= step(e2,stack,heap)) #Gt elif(case(expr,Foundation.Gt)): e1 = expr.expr1() e2 = expr.expr2() while(not isValue(e1)): e1 = step(e1,stack,heap) while(not isValue(e2)): e2 = step(e1,stack,heap) return Foundation.B(step(e1,stack,heap) > step(e2,stack,heap)) #Var Const Malloc elif(case(expr,Foundation.Malloc)): val = expr.expr3() while((not isValue(val)) and (type(val) != str) and (type(val) != float) and (type(val) != bool)): val = step(val,stack,heap) if(expr.expr1() == "Var"): heap.heap[expr.expr2().X()] = val elif(expr.expr1() == "Const"): stack.stack[expr.expr2().X()] = val return #Array elif(case(expr,Foundation.Array)): for i in range(0,len(expr.expr1())): step(expr.expr1()[i],stack,heap) return expr #Assign elif(case(expr,Foundation.Assign)): if(case(expr.expr1(),Foundation.Index)): arrayRaw = heap.heapCall(expr.expr1().expr1().X()) index = int(step(expr.expr1(),stack,heap).N()) valToAssign = expr.expr2() arrayRaw.expr1()[index] = valToAssign return expr name = expr.expr1().X() val = step(expr.expr2(),stack,heap) while((not isValue(val)) and (type(val) != str) and (type(val) != float) and (type(val) != bool)): val = step(val,stack,heap) heap.heap[name] = val return expr #Call elif(case(expr,Foundation.Call)): functionName = expr.expr2().X() functionObject = heap.heapCall(functionName) #Acorn functionArgName = functionObject.expr1().X() #Cocoa functionArgName = functionObject.expr1()[0].X() argument = expr.expr1() while(not isValue(argument)): argument = step(argument,stack,heap) functionBody = functionObject.expr2() sbtBody = subsitute(functionBody,argument,functionArgName) rtn = step(sbtBody,stack,heap) if(case(rtn,Foundation.Return)): if(not case(rtn.expr1(),Foundation.Null)): return rtn.expr1() return Foundation.Null() #stdin elif(case(expr,Foundation.Input)): castToken = input() return expr.cast(castToken) #Inductive cases #Induct Unary elif(case(expr,Foundation.Unary)): if(expr.uop() == "Neg"): return Foundation.Unary("Neg",step(expr.expr1(),stack,heap)) elif(expr.uop() == "Not"): return Foundation.Unary("Not",step(expr.expr1(),stack,heap)) #Induct Binary elif(case(expr,Foundation.Binary)): if(isValue(expr.expr1())): return Foundation.Binary(expr.bop(),expr.expr1(),step(expr.expr2(),stack,heap)) else: return Foundation.Binary(expr.bop(),step(expr.expr1(),stack,heap),expr.expr2()) #Induct Array Index elif(case(expr, Foundation.Index)): arrayRaw = step(expr.expr1(),stack,heap).expr1() index = step(expr.expr2(),stack,heap) while(not isRaw(index)): index = step(index,stack,heap) if(int(index) >= len(arrayRaw) and int(index) > -1): exit("Acorn: Array out-of-bound error. Attempted at accessing index outside of Array's memory") return arrayRaw[int(index)] #Induct If elif(case(expr,Foundation.If)): return step(Foundation.If(step(expr.expr1(),stack,heap),expr.expr2(),expr.expr3()),stack,heap) else: #Code should never hit this print("Acorn: Uncaught exception with Parser. Please report this case: "+str(expr)) if(isRaw(expr)): print("Error: 0x000000001") #return expr #Uncomment if you're feeling risk-K else: print("Error: 0xdeadbeef ekk :(") exit()
{ "repo_name": "mita4829/Acorn", "path": "Acorn 1.1.1 Deprecated/Parser_1.1.1_Deprecated.py", "copies": "1", "size": "13076", "license": "mit", "hash": -2060690735918574800, "line_mean": 34.5326086957, "line_max": 220, "alpha_frac": 0.5744876109, "autogenerated": false, "ratio": 3.4052083333333334, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9331376094094392, "avg_score": 0.029663970027788186, "num_lines": 368 }
"""A Couchbase CLI subcommand""" import getpass import inspect import ipaddress import json import os import platform import random import re import string import subprocess import sys import urllib.parse import tempfile import time from typing import Optional, List, Any, Dict from argparse import ArgumentError, ArgumentParser, HelpFormatter, Action, SUPPRESS from operator import itemgetter from cluster_manager import ClusterManager from pbar import TopologyProgressBar try: from cb_version import VERSION # pylint: disable=import-error except ImportError: VERSION = "0.0.0-0000-community" print(f'WARNING: Could not import cb_version, setting VERSION to {VERSION}') COUCHBASE_DEFAULT_PORT = 8091 BUCKET_PRIORITY_HIGH_INT = 8 BUCKET_PRIORITY_HIGH_STR = "high" BUCKET_PRIORITY_LOW_INT = 3 BUCKET_PRIORITY_LOW_STR = "low" BUCKET_TYPE_COUCHBASE = "membase" BUCKET_TYPE_MEMCACHED = "memcached" CB_BIN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "bin")) CB_ETC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "etc", "couchbase")) CB_LIB_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lib")) # On MacOS the config is store in the users home directory if platform.system() == "Darwin": CB_CFG_PATH = os.path.expanduser("~/Library/Application Support/Couchbase/var/lib/couchbase") else: CB_CFG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "var", "lib", "couchbase")) CB_MAN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "share")) if os.name == "nt": CB_MAN_PATH = os.path.join(CB_MAN_PATH, "doc", "couchbase-cli") else: CB_MAN_PATH = os.path.join(CB_MAN_PATH, "man", "man1") def get_doc_page_name(command: str) -> str: return f'{command}.{"1" if os.name != "nt" else "html"}' def remove_prefix(val: str, prefix: str) -> str: """This function removes a prefix from a string. Note this is a built-in function in Python 3.9 once we upgrade to it we should use it instead. """ return val[len(prefix):] if val.startswith(prefix) else val def rest_initialiser(cluster_init_check=False, version_check=False, enterprise_check=None): """rest_initialiser is a decorator that does common subcommand tasks. The decorator will always creates a cluster manager and assign it to the subcommand variable rest :param cluster_init_check: if true it will check if the cluster is initialized before executing the subcommand :param version_check: if true it will check if the cluster and CLI version match if they do not it prints a warning :param enterprise_check: if true it will check if the cluster is enterprise and fail if not. If it is false it does the check but it does not fail if not enterprise. If none it does not perform the check. The result of the check is stored on the instance parameter enterprise """ def inner(fn): def decorator(self, opts): self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify, opts.cacert, opts.debug) if cluster_init_check: check_cluster_initialized(self.rest) if version_check: check_versions(self.rest) if enterprise_check is not None: enterprise, errors = self.rest.is_enterprise() _exit_if_errors(errors) if enterprise_check and not enterprise: _exit_if_errors(['Command only available in enterprise edition']) self.enterprise = enterprise return fn(self, opts) return decorator return inner def check_cluster_initialized(rest): initialized, errors = rest.is_cluster_initialized() if errors: _exit_if_errors(errors) if not initialized: _exit_if_errors(["Cluster is not initialized, use cluster-init to initialize the cluster"]) def check_versions(rest): result, errors = rest.pools() if errors: return server_version = result['implementationVersion'] if server_version is None or VERSION is None: return major_couch = server_version[: server_version.index('.')] minor_couch = server_version[server_version.index('.') + 1: server_version.index('.', len(major_couch) + 1)] major_cli = VERSION[: VERSION.index('.')] minor_cli = VERSION[VERSION.index('.') + 1: VERSION.index('.', len(major_cli) + 1)] if major_cli != major_couch or minor_cli != minor_couch: _warning(f'couchbase-cli version {VERSION} does not match couchbase server version {server_version}') def index_storage_mode_to_param(value, default="plasma"): """Converts the index storage mode to what Couchbase understands""" if value == "default": return default if value == "memopt": return "memory_optimized" return value def process_services(services, enterprise): """Converts services to a format Couchbase understands""" sep = "," if services.find(sep) < 0: # backward compatible when using ";" as separator sep = ";" svc_set = set([w.strip() for w in services.split(sep)]) svc_candidate = ["data", "index", "query", "fts", "eventing", "analytics", "backup"] for svc in svc_set: if svc not in svc_candidate: return None, [f'`{svc}` is not a valid service'] if not enterprise and svc in ["eventing", "analytics", "backup"]: return None, [f'{svc} service is only available on Enterprise Edition'] if not enterprise: # Valid CE node service configuration ce_svc_30 = set(["data"]) ce_svc_40 = set(["data", "index", "query"]) ce_svc_45 = set(["data", "index", "query", "fts"]) if svc_set not in [ce_svc_30, ce_svc_40, ce_svc_45]: return None, [f"Invalid service configuration. Community Edition only supports nodes with the following" f" combinations of services: '{''.join(ce_svc_30)}', '{','.join(ce_svc_40)}' or " f"'{','.join(ce_svc_45)}'"] services = ",".join(svc_set) for old, new in [[";", ","], ["data", "kv"], ["query", "n1ql"], ["analytics", "cbas"]]: services = services.replace(old, new) return services, None def find_subcommands(): """Finds all subcommand classes""" clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass) subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand)) and cls[1] not in [Subcommand, LocalSubcommand]] subcommands = [] for subclass in subclasses: name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])]) subcommands.append((name, subclass[1])) return subcommands def _success(msg): print(f'SUCCESS: {msg}') def _deprecated(msg): print(f'DEPRECATED: {msg}') def _warning(msg): print(f'WARNING: {msg}') def _exit_if_errors(errors): if errors: for error in errors: # Some endpoint return errors prefixed with '_ -' this has to be stripped out. For more information see # MB-42801 print(f'ERROR: {remove_prefix(error, "_ -").lstrip(" ")}') sys.exit(1) def _exit_on_file_write_failure(fname, to_write): try: wfile = open(fname, 'w') wfile.write(to_write) wfile.close() except IOError as error: _exit_if_errors([error]) def _exit_on_file_read_failure(fname, to_report=None): try: rfile = open(fname, 'r') read_bytes = rfile.read() rfile.close() return read_bytes except IOError as error: if to_report is None: _exit_if_errors([f'{error.strerror} `{fname}`']) else: _exit_if_errors([to_report]) def apply_default_port(nodes): """ Adds the default port if the port is missing. @type nodes: string @param nodes: A comma seprated list of nodes @rtype: array of strings @return: The nodes with the port postfixed on each one """ nodes = nodes.split(',') def append_port(node): if re.match(r'.*:\d+$', node): return node return f'{node}:8091' return [append_port(x) for x in nodes] class CLIHelpFormatter(HelpFormatter): """Format help with indented section bodies""" def __init__(self, prog, indent_increment=2, max_help_position=30, width=None): HelpFormatter.__init__(self, prog, indent_increment, max_help_position, width) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent + 2 self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) def _format_action_invocation(self, action): if not action.option_strings: metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: parts = [] if action.nargs == 0: parts.extend(action.option_strings) return ','.join(parts) else: default = action.dest args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append(option_string) return ','.join(parts) + ' ' + args_string class CBDeprecatedAction(Action): """Indicates that a specific option is deprecated""" def __call__(self, parser, namespace, values, option_string=None): _deprecated('Specifying ' + '/'.join(self.option_strings) + ' is deprecated') if self.nargs == 0: setattr(namespace, self.dest, self.const) else: setattr(namespace, self.dest, values) class CBHostAction(Action): """Allows the handling of hostnames on the command line""" def __call__(self, parser, namespace, values, option_string=None): parsed = urllib.parse.urlparse(values) # If the netloc is empty then it means that there was no scheme added # to the URI and we are parsing it as a path. In this case no scheme # means HTTP so we can add that scheme to the hostname provided. if parsed.netloc == "": parsed = urllib.parse.urlparse("http://" + values) if parsed.scheme == "": parsed = urllib.parse.urlparse("http://" + values) if parsed.path != "" or parsed.params != "" or parsed.query != "" or parsed.fragment != "": raise ArgumentError(self, f"{values} is not an accepted hostname") if not parsed.hostname: raise ArgumentError(self, f"{values} is not an accepted hostname") hostname_regex = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*' + r'([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$') if not hostname_regex.match(parsed.hostname): try: ipaddress.ip_address(parsed.hostname) except ValueError as val_error: raise ArgumentError(self, f"{values} is not an accepted hostname") from val_error scheme = parsed.scheme port = None if scheme in ["http", "couchbase"]: if not parsed.port: port = 8091 if scheme == "couchbase": scheme = "http" elif scheme in ["https", "couchbases"]: if not parsed.port: port = 18091 if scheme == "couchbases": scheme = "https" else: raise ArgumentError(self, "%s is not an accepted scheme" % scheme) if parsed.port: setattr(namespace, self.dest, (scheme + "://" + parsed.netloc)) else: setattr(namespace, self.dest, (scheme + "://" + parsed.netloc + ":" + str(port))) class CBEnvAction(Action): """Allows the custom handling of environment variables for command line options""" def __init__(self, envvar, required=True, default=None, **kwargs): if not default and envvar and envvar in os.environ: default = os.environ[envvar] if required and default: required = False super(CBEnvAction, self).__init__(default=default, required=required, **kwargs) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class CBNonEchoedAction(CBEnvAction): """Allows an argument to be specified by use of a non-echoed value passed through stdin, through an environment variable, or as a value to the argument""" def __init__(self, envvar, prompt_text="Enter password:", confirm_text=None, required=True, default=None, nargs='?', **kwargs): self.prompt_text = prompt_text self.confirm_text = confirm_text super(CBNonEchoedAction, self).__init__(envvar, required=required, default=default, nargs=nargs, **kwargs) def __call__(self, parser, namespace, values, option_string=None): if values is None: values = getpass.getpass(self.prompt_text) if self.confirm_text is not None: confirm = getpass.getpass(self.prompt_text) if values != confirm: raise ArgumentError(self, "Passwords entered do not match, please retry") super(CBNonEchoedAction, self).__call__(parser, namespace, values, option_string=None) class CBHelpAction(Action): """Allows the custom handling of the help command line argument""" # pylint: disable=redefined-builtin def __init__(self, option_strings, klass, dest=SUPPRESS, default=SUPPRESS, help=None): super(CBHelpAction, self).__init__(option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) # pylint: disable=redefined-builtin self.klass = klass def __call__(self, parser, namespace, values, option_string=None): if option_string == "-h": parser.print_help() else: CBHelpAction._show_man_page(self.klass.get_man_page_name()) parser.exit() @staticmethod def _show_man_page(page): if os.name == "nt": try: subprocess.call(["rundll32.exe", "url.dll,FileProtocolHandler", os.path.join(CB_MAN_PATH, page)]) except OSError as e: _exit_if_errors(["Unable to open man page using your browser, %s" % e]) else: try: subprocess.call(["man", os.path.join(CB_MAN_PATH, page)]) except OSError: _exit_if_errors(["Unable to open man page using the 'man' command, ensure it is on your path or" + "install a manual reader"]) class CliParser(ArgumentParser): def __init__(self, *args, **kwargs): super(CliParser, self).__init__(*args, **kwargs) def error(self, message): self.exit(2, f'ERROR: {message}\n') class Command(object): """A Couchbase CLI Command""" def __init__(self): self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False) def parse(self, args): """Parses the subcommand""" if len(args) == 0: self.short_help() return self.parser.parse_args(args) def short_help(self, code=0): """Prints the short help message and exits""" self.parser.print_help() self.parser.exit(code) def execute(self, opts): """Executes the subcommand""" raise NotImplementedError @staticmethod def get_man_page_name(): """Returns the man page name""" raise NotImplementedError @staticmethod def get_description(): """Returns the command description""" raise NotImplementedError class CouchbaseCLI(Command): """A Couchbase CLI command""" def __init__(self): super(CouchbaseCLI, self).__init__() self.parser.prog = "couchbase-cli" subparser = self.parser.add_subparsers(title="Commands", metavar="") for (name, klass) in find_subcommands(): if klass.is_hidden(): subcommand = subparser.add_parser(name) else: subcommand = subparser.add_parser(name, help=klass.get_description()) subcommand.set_defaults(klass=klass) group = self.parser.add_argument_group("Options") group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") group.add_argument("--version", help="Get couchbase-cli version") def parse(self, args): if len(sys.argv) == 1: self.parser.print_help() self.parser.exit(1) if args[1] == "--version": print(VERSION) sys.exit(0) if not args[1] in ["-h", "--help", "--version"] and args[1].startswith("-"): _exit_if_errors([f"Unknown subcommand: '{args[1]}'. The first argument has to be a subcommand like" f" 'bucket-list' or 'rebalance', please see couchbase-cli -h for the full list of commands" f" and options"]) l1_args = self.parser.parse_args(args[1:2]) l2_args = l1_args.klass().parse(args[2:]) setattr(l2_args, 'klass', l1_args.klass) return l2_args def execute(self, opts): opts.klass().execute(opts) @staticmethod def get_man_page_name(): """Returns the man page name""" return get_doc_page_name("couchbase-cli") @staticmethod def get_description(): return "A Couchbase cluster administration utility" class Subcommand(Command): """ A Couchbase CLI Subcommand: This is for subcommand that interact with a remote Couchbase Server over the REST API. """ def __init__(self, deprecate_username=False, deprecate_password=False, cluster_default=None): super(Subcommand, self).__init__() # Filled by the decorators self.rest = None self.enterprise = None self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False) group = self.parser.add_argument_group("Cluster options") group.add_argument("-c", "--cluster", dest="cluster", required=(cluster_default is None), metavar="<cluster>", action=CBHostAction, default=cluster_default, help="The hostname of the Couchbase cluster") if deprecate_username: group.add_argument("-u", "--username", dest="username", action=CBDeprecatedAction, help=SUPPRESS) else: group.add_argument("-u", "--username", dest="username", required=True, action=CBEnvAction, envvar='CB_REST_USERNAME', metavar="<username>", help="The username for the Couchbase cluster") if deprecate_password: group.add_argument("-p", "--password", dest="password", action=CBDeprecatedAction, help=SUPPRESS) else: group.add_argument("-p", "--password", dest="password", required=True, action=CBNonEchoedAction, envvar='CB_REST_PASSWORD', metavar="<password>", help="The password for the Couchbase cluster") group.add_argument("-o", "--output", dest="output", default="standard", metavar="<output>", choices=["json", "standard"], help="The output type (json or standard)") group.add_argument("-d", "--debug", dest="debug", action="store_true", help="Run the command with extra logging") group.add_argument("-s", "--ssl", dest="ssl", const=True, default=False, nargs=0, action=CBDeprecatedAction, help="Use ssl when connecting to Couchbase (Deprecated)") group.add_argument("--no-ssl-verify", dest="ssl_verify", action="store_false", default=True, help="Skips SSL verification of certificates against the CA") group.add_argument("--cacert", dest="cacert", default=True, help="Verifies the cluster identity with this certificate") group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") def execute(self, opts): # pylint: disable=useless-super-delegation super(Subcommand, self).execute(opts) @staticmethod def get_man_page_name(): return Command.get_man_page_name() @staticmethod def get_description(): return Command.get_description() @staticmethod def is_hidden(): """Whether or not the subcommand should be hidden from the help message""" return False class LocalSubcommand(Command): """ A Couchbase CLI Localcommand: This is for subcommands that interact with the local Couchbase Server via the filesystem or a local socket. """ def __init__(self): super(LocalSubcommand, self).__init__() self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False) group = self.parser.add_argument_group(title="Local command options", description="This command has to be execute on the locally running" + " Couchbase Server.") group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") group.add_argument("--config-path", dest="config_path", metavar="<path>", default=CB_CFG_PATH, help=SUPPRESS) def execute(self, opts): # pylint: disable=useless-super-delegation super(LocalSubcommand, self).execute(opts) @staticmethod def get_man_page_name(): return Command.get_man_page_name() @staticmethod def get_description(): return Command.get_description() @staticmethod def is_hidden(): """Whether or not the subcommand should be hidden from the help message""" return False class ClusterInit(Subcommand): """The cluster initialization subcommand""" def __init__(self): super(ClusterInit, self).__init__(True, True, "http://127.0.0.1:8091") self.parser.prog = "couchbase-cli cluster-init" group = self.parser.add_argument_group("Cluster initialization options") group.add_argument("--cluster-username", dest="username", required=True, metavar="<username>", help="The cluster administrator username") group.add_argument("--cluster-password", dest="password", required=True, metavar="<password>", help="The cluster administrator password") group.add_argument("--cluster-port", dest="port", type=(int), metavar="<port>", help="The cluster administration console port") group.add_argument("--cluster-ramsize", dest="data_mem_quota", type=(int), metavar="<quota>", help="The data service memory quota in mebibytes") group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", type=(int), metavar="<quota>", help="The index service memory quota in mebibytes") group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", type=(int), metavar="<quota>", help="The full-text service memory quota in mebibytes") group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", type=(int), metavar="<quota>", help="The Eventing service memory quota in mebibytes") group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", type=(int), metavar="<quota>", help="The analytics service memory quota in mebibytes") group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name") group.add_argument("--index-storage-setting", dest="index_storage_mode", choices=["default", "memopt"], metavar="<mode>", help="The index storage backend (Defaults to \"default)\"") group.add_argument("--services", dest="services", default="data", metavar="<service_list>", help="The services to run on this server") group.add_argument("--update-notifications", dest="notifications", metavar="<1|0>", choices=["0", "1"], default="1", help="Enables/disable software update notifications") @rest_initialiser(enterprise_check=False) def execute(self, opts): # We need to ensure that creating the REST username/password is the # last REST API that is called because once that API succeeds the # cluster is initialized and cluster-init cannot be run again. initialized, errors = self.rest.is_cluster_initialized() _exit_if_errors(errors) if initialized: _exit_if_errors(["Cluster is already initialized, use setting-cluster to change settings"]) if not self.enterprise and opts.index_storage_mode == 'memopt': _exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"]) services, errors = process_services(opts.services, self.enterprise) _exit_if_errors(errors) if 'kv' not in services.split(','): _exit_if_errors(["Cannot set up first cluster node without the data service"]) if opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota \ or opts.eventing_mem_quota or opts.name is not None: _, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota, opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name) _exit_if_errors(errors) # Set the index storage mode if not opts.index_storage_mode and 'index' in services.split(','): opts.index_storage_mode = "default" default = "plasma" if not self.enterprise: default = "forestdb" if opts.index_storage_mode: param = index_storage_mode_to_param(opts.index_storage_mode, default) _, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None) _exit_if_errors(errors) # Setup services _, errors = self.rest.setup_services(services) _exit_if_errors(errors) # Enable notifications if opts.notifications == "1": _, errors = self.rest.enable_notifications(True) else: _, errors = self.rest.enable_notifications(False) _exit_if_errors(errors) # Setup Administrator credentials and Admin Console port _, errors = self.rest.set_admin_credentials(opts.username, opts.password, opts.port) _exit_if_errors(errors) _success("Cluster initialized") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-cluster-init") @staticmethod def get_description(): return "Initialize a Couchbase cluster" class BucketCompact(Subcommand): """The bucket compact subcommand""" def __init__(self): super(BucketCompact, self).__init__() self.parser.prog = "couchbase-cli bucket-compact" group = self.parser.add_argument_group("Bucket compaction options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", help="The name of bucket to compact") group.add_argument("--data-only", dest="data_only", action="store_true", help="Only compact the data files") group.add_argument("--view-only", dest="view_only", action="store_true", help="Only compact the view files") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): bucket, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) if bucket["bucketType"] != BUCKET_TYPE_COUCHBASE: _exit_if_errors(["Cannot compact memcached buckets"]) _, errors = self.rest.compact_bucket(opts.bucket_name, opts.data_only, opts.view_only) _exit_if_errors(errors) _success("Bucket compaction started") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-bucket-compact") @staticmethod def get_description(): return "Compact database and view data" class BucketCreate(Subcommand): """The bucket create subcommand""" def __init__(self): super(BucketCreate, self).__init__() self.parser.prog = "couchbase-cli bucket-create" group = self.parser.add_argument_group("Bucket create options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to create") group.add_argument("--bucket-type", dest="type", metavar="<type>", required=True, choices=["couchbase", "ephemeral", "memcached"], help="The bucket type (couchbase, ephemeral, or memcached)") group.add_argument("--storage-backend", dest="storage", metavar="<storage>", choices=["couchstore", "magma"], help="Type of storage backend (only for couchbase buckets)") group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>", type=(int), required=True, help="The amount of memory to allocate the bucket") group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>", choices=["0", "1", "2", "3"], help="The replica count for the bucket") group.add_argument("--bucket-priority", dest="priority", metavar="<priority>", choices=[BUCKET_PRIORITY_LOW_STR, BUCKET_PRIORITY_HIGH_STR], help="The bucket disk io priority (low or high)") group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>", choices=["none", "majority", "majorityAndPersistActive", "persistToMajority"], help="The bucket durability minimum level") group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>", choices=["valueOnly", "fullEviction", "noEviction", "nruEviction"], help="The bucket eviction policy") group.add_argument("--conflict-resolution", dest="conflict_resolution", default=None, choices=["sequence", "timestamp"], metavar="<type>", help="The XDCR conflict resolution type (timestamp or sequence)") group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>", help="Set the maximum TTL the bucket will accept. Couchbase server Enterprise Edition only.") group.add_argument("--compression-mode", dest="compression_mode", choices=["off", "passive", "active"], metavar="<mode>", help="Set the compression mode of the bucket") group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>", choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)") group.add_argument("--enable-index-replica", dest="replica_indexes", metavar="<0|1>", choices=["0", "1"], help="Enable replica indexes (0 or 1)") group.add_argument("--wait", dest="wait", action="store_true", help="Wait for bucket creation to complete") group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc", metavar="<perc>", type=(int), help="Set Database Fragmentation level percent") group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size", metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level") group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc", metavar="<perc>", type=(int), help="Set View Fragmentation level percent") group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size", metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size") group.add_argument("--from-hour", dest="from_hour", metavar="<quota>", type=(int), help="Set start time hour") group.add_argument("--from-minute", dest="from_min", metavar="<quota>", type=(int), help="Set start time minutes") group.add_argument("--to-hour", dest="to_hour", metavar="<quota>", type=(int), help="Set end time hour") group.add_argument("--to-minute", dest="to_min", metavar="<quota>", type=(int), help="Set end time minutes") group.add_argument("--abort-outside", dest="abort_outside", metavar="<0|1>", choices=["0", "1"], help="Allow Time period") group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact", metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction") group.add_argument("--purge-interval", dest="purge_interval", type=(float), metavar="<float>", help="Sets the frequency of the tombstone purge interval") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if opts.max_ttl and not self.enterprise: _exit_if_errors(["Maximum TTL can only be configured on enterprise edition"]) if opts.compression_mode and not self.enterprise: _exit_if_errors(["Compression mode can only be configured on enterprise edition"]) if opts.type == "memcached": _deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead") if opts.replica_count is not None: _exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"]) if opts.conflict_resolution is not None: _exit_if_errors(["--conflict-resolution cannot be specified for a memcached bucket"]) if opts.replica_indexes is not None: _exit_if_errors(["--enable-index-replica cannot be specified for a memcached bucket"]) if opts.priority is not None: _exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"]) if opts.eviction_policy is not None: _exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"]) if opts.max_ttl is not None: _exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"]) if opts.compression_mode is not None: _exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"]) if opts.durability_min_level is not None: _exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"]) elif opts.type == "ephemeral" and opts.eviction_policy in ["valueOnly", "fullEviction"]: _exit_if_errors(["--bucket-eviction-policy must either be noEviction or nruEviction"]) elif opts.type == "couchbase" and opts.eviction_policy in ["noEviction", "nruEviction"]: _exit_if_errors(["--bucket-eviction-policy must either be valueOnly or fullEviction"]) if ((opts.type == "memcached" or opts.type == "ephemeral") and (opts.db_frag_perc is not None or opts.db_frag_size is not None or opts.view_frag_perc is not None or opts.view_frag_size is not None or opts.from_hour is not None or opts.from_min is not None or opts.to_hour is not None or opts.to_min is not None or opts.abort_outside is not None or opts.paralleldb_and_view_compact is not None)): _warning(f'ignoring compaction settings as bucket type {opts.type} does not accept it') storage_type = "couchstore" if opts.storage is not None: if opts.type != "couchbase": _exit_if_errors(["--storage-backend is only valid for couchbase buckets"]) if opts.storage == "magma": storage_type = "magma" priority = None if opts.priority is not None: if opts.priority == BUCKET_PRIORITY_HIGH_STR: priority = BUCKET_PRIORITY_HIGH_INT elif opts.priority == BUCKET_PRIORITY_LOW_STR: priority = BUCKET_PRIORITY_LOW_INT conflict_resolution_type = None if opts.conflict_resolution is not None: if opts.conflict_resolution == "sequence": conflict_resolution_type = "seqno" elif opts.conflict_resolution == "timestamp": conflict_resolution_type = "lww" _, errors = self.rest.create_bucket(opts.bucket_name, opts.type, storage_type, opts.memory_quota, opts.durability_min_level, opts.eviction_policy, opts.replica_count, opts.replica_indexes, priority, conflict_resolution_type, opts.enable_flush, opts.max_ttl, opts.compression_mode, opts.wait, opts.db_frag_perc, opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size, opts.from_hour, opts.from_min, opts.to_hour, opts.to_min, opts.abort_outside, opts.paralleldb_and_view_compact, opts.purge_interval) _exit_if_errors(errors) _success("Bucket created") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-bucket-create") @staticmethod def get_description(): return "Add a new bucket to the cluster" class BucketDelete(Subcommand): """The bucket delete subcommand""" def __init__(self): super(BucketDelete, self).__init__() self.parser.prog = "couchbase-cli bucket-delete" group = self.parser.add_argument_group("Bucket delete options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to delete") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) _, errors = self.rest.delete_bucket(opts.bucket_name) _exit_if_errors(errors) _success("Bucket deleted") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-bucket-delete") @staticmethod def get_description(): return "Delete an existing bucket" class BucketEdit(Subcommand): """The bucket edit subcommand""" def __init__(self): super(BucketEdit, self).__init__() self.parser.prog = "couchbase-cli bucket-edit" group = self.parser.add_argument_group("Bucket edit options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to create") group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>", type=(int), help="The amount of memory to allocate the bucket") group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>", choices=["0", "1", "2", "3"], help="The replica count for the bucket") group.add_argument("--bucket-priority", dest="priority", metavar="<priority>", choices=["low", "high"], help="The bucket disk io priority (low or high)") group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>", choices=["none", "majority", "majorityAndPersistActive", "persistToMajority"], help="The bucket durability minimum level") group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>", type=(str), help="The bucket eviction policy (valueOnly or fullEviction)") group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>", help="Set the maximum TTL the bucket will accept") group.add_argument("--compression-mode", dest="compression_mode", choices=["off", "passive", "active"], metavar="<mode>", help="Set the compression mode of the bucket") group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>", choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)") group.add_argument("--remove-bucket-port", dest="remove_port", metavar="<0|1>", choices=["0", "1"], help="Removes the bucket-port setting") group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc", metavar="<perc>", type=(int), help="Set Database Fragmentation level percent") group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size", metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level") group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc", metavar="<perc>", type=(int), help="Set View Fragmentation level percent") group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size", metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size") group.add_argument("--from-hour", dest="from_hour", metavar="<hour>", type=(int), help="Set start time hour") group.add_argument("--from-minute", dest="from_min", metavar="<min>", type=(int), help="Set start time minutes") group.add_argument("--to-hour", dest="to_hour", metavar="<hour>", type=(int), help="Set end time hour") group.add_argument("--to-minute", dest="to_min", metavar="<min>", type=(int), help="Set end time minutes") group.add_argument("--abort-outside", dest="abort_outside", metavar="<0|1>", choices=["0", "1"], help="Allow Time period") group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact", metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction") group.add_argument("--purge-interval", dest="purge_interval", type=(float), metavar="<num>", help="Set the bucket metadata purge interval") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if opts.max_ttl and not self.enterprise: _exit_if_errors(["Maximum TTL can only be configured on enterprise edition"]) if opts.compression_mode and not self.enterprise: _exit_if_errors(["Compression mode can only be configured on enterprise edition"]) # Note that we accept 'noEviction' and 'nruEviction' as valid values even though they are undocumented; this is # so that users attempting to modify the eviction policy of an ephemeral bucket will receive a meaningful # message from 'ns_server'. See MB-39036 for more information. if (opts.eviction_policy is not None and opts.eviction_policy not in ["valueOnly", "fullEviction", "noEviction", "nruEviction"]): _exit_if_errors([f"argument --bucket-eviction-policy: invalid choice: '{opts.eviction_policy}'"+ " (choose from 'valueOnly', 'fullEviction')"]) bucket, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) if "bucketType" in bucket and bucket["bucketType"] == "memcached": _deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead") if opts.memory_quota is not None: _exit_if_errors(["--bucket-ramsize cannot be specified for a memcached bucket"]) if opts.replica_count is not None: _exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"]) if opts.priority is not None: _exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"]) if opts.eviction_policy is not None: _exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"]) if opts.max_ttl is not None: _exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"]) if opts.compression_mode is not None: _exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"]) if opts.durability_min_level is not None: _exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"]) if (("bucketType" in bucket and (bucket["bucketType"] == "memcached" or bucket["bucketType"] == "ephemeral")) and (opts.db_frag_perc is not None or opts.db_frag_size is not None or opts.view_frag_perc is not None or opts.view_frag_size is not None or opts.from_hour is not None or opts.from_min is not None or opts.to_hour is not None or opts.to_min is not None or opts.abort_outside is not None or opts.paralleldb_and_view_compact is not None)): _exit_if_errors([f'compaction settings can not be specified for a {bucket["bucketType"]} bucket']) priority = None if opts.priority is not None: if opts.priority == BUCKET_PRIORITY_HIGH_STR: priority = BUCKET_PRIORITY_HIGH_INT elif opts.priority == BUCKET_PRIORITY_LOW_STR: priority = BUCKET_PRIORITY_LOW_INT if opts.remove_port: if opts.remove_port == '1': opts.remove_port = True else: opts.remove_port = False _, errors = self.rest.edit_bucket(opts.bucket_name, opts.memory_quota, opts.durability_min_level, opts.eviction_policy, opts.replica_count, priority, opts.enable_flush, opts.max_ttl, opts.compression_mode, opts.remove_port, opts.db_frag_perc, opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size, opts.from_hour, opts.from_min, opts.to_hour, opts.to_min, opts.abort_outside, opts.paralleldb_and_view_compact, opts.purge_interval, 'bucketType' in bucket and bucket['bucketType'] == 'membase') _exit_if_errors(errors) _success("Bucket edited") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-bucket-edit") @staticmethod def get_description(): return "Modify settings for an existing bucket" class BucketFlush(Subcommand): """The bucket edit subcommand""" def __init__(self): super(BucketFlush, self).__init__() self.parser.prog = "couchbase-cli bucket-flush" group = self.parser.add_argument_group("Bucket flush options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to delete") group.add_argument("--force", dest="force", action="store_true", help="Execute the command without asking to confirm") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) if not opts.force: question = "Running this command will totally PURGE database data from disk. " + \ "Do you really want to do it? (Yes/No)" confirm = input(question) if confirm not in ('y', 'Y', 'yes', 'Yes'): return _, errors = self.rest.flush_bucket(opts.bucket_name) _exit_if_errors(errors) _success("Bucket flushed") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-bucket-flush") @staticmethod def get_description(): return "Flush all data from disk for a given bucket" class BucketList(Subcommand): """The bucket list subcommand""" def __init__(self): super(BucketList, self).__init__() self.parser.prog = "couchbase-cli bucket-list" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): result, errors = self.rest.list_buckets(extended=True) _exit_if_errors(errors) if opts.output == 'json': print(json.dumps(result)) else: for bucket in result: print(f'{bucket["name"]}') print(f' bucketType: {bucket["bucketType"]}') print(f' numReplicas: {bucket["replicaNumber"]}') print(f' ramQuota: {bucket["quota"]["ram"]}') print(f' ramUsed: {bucket["basicStats"]["memUsed"]}') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-bucket-list") @staticmethod def get_description(): return "List all buckets in a cluster" class CollectLogsStart(Subcommand): """The collect-logs-start subcommand""" def __init__(self): super(CollectLogsStart, self).__init__() self.parser.prog = "couchbase-cli collect-logs-start" group = self.parser.add_argument_group("Collect logs start options") group.add_argument("--all-nodes", dest="all_nodes", action="store_true", default=False, help="Collect logs for all nodes") group.add_argument("--nodes", dest="nodes", metavar="<node_list>", help="A comma separated list of nodes to collect logs from") group.add_argument("--redaction-level", dest="redaction_level", metavar="<none|partial>", choices=["none", "partial"], help="Level of log redaction to apply") group.add_argument("--salt", dest="salt", metavar="<string>", help="The salt to use to redact the log") group.add_argument("--output-directory", dest="output_dir", metavar="<directory>", help="Output directory to place the generated logs file") group.add_argument("--temporary-directory", dest="tmp_dir", metavar="<directory>", help="Temporary directory to use when generating the logs") group.add_argument("--upload", dest="upload", action="store_true", default=False, help="Logs should be uploaded for Couchbase support") group.add_argument("--upload-host", dest="upload_host", metavar="<host>", help="The host to upload logs to") group.add_argument("--upload-proxy", dest="upload_proxy", metavar="<proxy>", help="The proxy to used to upload the logs via") group.add_argument("--customer", dest="upload_customer", metavar="<name>", help="The name of the customer uploading logs") group.add_argument("--ticket", dest="upload_ticket", metavar="<num>", help="The ticket number the logs correspond to") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if not opts.nodes and not opts.all_nodes: _exit_if_errors(["Must specify either --all-nodes or --nodes"]) if opts.nodes and opts.all_nodes: _exit_if_errors(["Cannot specify both --all-nodes and --nodes"]) if opts.salt and opts.redaction_level != "partial": _exit_if_errors(["--redaction-level has to be set to 'partial' when --salt is specified"]) servers = opts.nodes if opts.all_nodes: servers = "*" if opts.upload: if not opts.upload_host: _exit_if_errors(["--upload-host is required when --upload is specified"]) if not opts.upload_customer: _exit_if_errors(["--upload-customer is required when --upload is specified"]) else: if opts.upload_host: _warning("--upload-host has no effect with specifying --upload") if opts.upload_customer: _warning("--upload-customer has no effect with specifying --upload") if opts.upload_ticket: _warning("--upload_ticket has no effect with specifying --upload") if opts.upload_proxy: _warning("--upload_proxy has no effect with specifying --upload") _, errors = self.rest.collect_logs_start(servers, opts.redaction_level, opts.salt, opts.output_dir, opts.tmp_dir, opts.upload, opts.upload_host, opts.upload_proxy, opts.upload_customer, opts.upload_ticket) _exit_if_errors(errors) _success("Log collection started") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-collect-logs-start") @staticmethod def get_description(): return "Start cluster log collection" class CollectLogsStatus(Subcommand): """The collect-logs-status subcommand""" def __init__(self): super(CollectLogsStatus, self).__init__() self.parser.prog = "couchbase-cli collect-logs-status" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): tasks, errors = self.rest.get_tasks() _exit_if_errors(errors) found = False for task in tasks: if isinstance(task, dict) and 'type' in task and task['type'] == 'clusterLogsCollection': found = True self._print_task(task) if not found: print("No log collection tasks were found") def _print_task(self, task): print(f'Status: {task["status"]}') if 'perNode' in task: print("Details:") for node, node_status in task["perNode"].items(): print('\tNode:', node) print('\tStatus:', node_status['status']) for field in ["path", "statusCode", "url", "uploadStatusCode", "uploadOutput"]: if field in node_status: print('\t', field, ":", node_status[field]) print() @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-collect-logs-status") @staticmethod def get_description(): return "View the status of cluster log collection" class CollectLogsStop(Subcommand): """The collect-logs-stop subcommand""" def __init__(self): super(CollectLogsStop, self).__init__() self.parser.prog = "couchbase-cli collect-logs-stop" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.collect_logs_stop() _exit_if_errors(errors) _success("Log collection stopped") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-collect-logs-stop") @staticmethod def get_description(): return "Stop cluster log collection" class Failover(Subcommand): """The failover subcommand""" def __init__(self): super(Failover, self).__init__() self.parser.prog = "couchbase-cli failover" group = self.parser.add_argument_group("Failover options") group.add_argument("--server-failover", dest="servers_to_failover", metavar="<server_list>", required=True, help="A list of servers to fail over") group.add_argument("--hard", dest="hard", action="store_true", help="Hard failover the server") group.add_argument("--force", dest="force", action="store_true", help="Force a hard failover") group.add_argument("--no-progress-bar", dest="no_bar", action="store_true", default=False, help="Disables the progress bar") group.add_argument("--no-wait", dest="wait", action="store_false", default=True, help="Don't wait for rebalance completion") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.force and not opts.hard: _exit_if_errors(["--hard is required with --force flag"]) opts.servers_to_failover = apply_default_port(opts.servers_to_failover) _, errors = self.rest.failover(opts.servers_to_failover, opts.hard, opts.force) _exit_if_errors(errors) if not opts.hard: time.sleep(1) if opts.wait: bar = TopologyProgressBar(self.rest, 'Gracefully failing over', opts.no_bar) errors = bar.show() _exit_if_errors(errors) _success("Server failed over") else: _success("Server failed over started") else: _success("Server failed over") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-failover") @staticmethod def get_description(): return "Failover one or more servers" class GroupManage(Subcommand): """The group manage subcommand""" def __init__(self): super(GroupManage, self).__init__() self.parser.prog = "couchbase-cli group-manage" group = self.parser.add_argument_group("Group manage options") group.add_argument("--create", dest="create", action="store_true", default=None, help="Create a new server group") group.add_argument("--delete", dest="delete", action="store_true", default=None, help="Delete a server group") group.add_argument("--list", dest="list", action="store_true", default=None, help="List all server groups") group.add_argument("--rename", dest="rename", help="Rename a server group. It takes the new name of the group.") group.add_argument("--group-name", dest="name", metavar="<name>", help="The name of the server group") group.add_argument("--move-servers", dest="move_servers", metavar="<server_list>", help="A list of servers to move between groups") group.add_argument("--from-group", dest="from_group", metavar="<group>", help="The group to move servers from") group.add_argument("--to-group", dest="to_group", metavar="<group>", help="The group to move servers to") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): cmds = [opts.create, opts.delete, opts.list, opts.rename, opts.move_servers] if sum(cmd is not None for cmd in cmds) == 0: _exit_if_errors(["Must specify one of the following: --create, " + "--delete, --list, --move-servers, or --rename"]) elif sum(cmd is not None for cmd in cmds) != 1: _exit_if_errors(["Only one of the following may be specified: --create" + ", --delete, --list, --move-servers, or --rename"]) if opts.create: self._create(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list(opts) elif opts.rename: self._rename(opts) elif opts.move_servers is not None: self._move(opts) def _create(self, opts): if opts.name is None: _exit_if_errors(["--group-name is required with --create flag"]) _, errors = self.rest.create_server_group(opts.name) _exit_if_errors(errors) _success("Server group created") def _delete(self, opts): if opts.name is None: _exit_if_errors(["--group-name is required with --delete flag"]) _, errors = self.rest.delete_server_group(opts.name) _exit_if_errors(errors) _success("Server group deleted") def _list(self, opts): groups, errors = self.rest.get_server_groups() _exit_if_errors(errors) found = False for group in groups["groups"]: if opts.name is None or opts.name == group['name']: found = True print(group['name']) for node in group['nodes']: print(f' server: {node["hostname"]}') if not found and opts.name: _exit_if_errors([f'Invalid group name: {opts.name}']) def _move(self, opts): if opts.from_group is None: _exit_if_errors(["--from-group is required with --move-servers"]) if opts.to_group is None: _exit_if_errors(["--to-group is required with --move-servers"]) servers = apply_default_port(opts.move_servers) _, errors = self.rest.move_servers_between_groups(servers, opts.from_group, opts.to_group) _exit_if_errors(errors) _success("Servers moved between groups") def _rename(self, opts): if opts.name is None: _exit_if_errors(["--group-name is required with --rename option"]) _, errors = self.rest.rename_server_group(opts.name, opts.rename) _exit_if_errors(errors) _success("Server group renamed") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-group-manage") @staticmethod def get_description(): return "Manage server groups" class HostList(Subcommand): """The host list subcommand""" def __init__(self): super(HostList, self).__init__() self.parser.prog = "couchbase-cli host-list" @rest_initialiser(version_check=True) def execute(self, opts): result, errors = self.rest.pools('default') _exit_if_errors(errors) if opts.output == 'json': nodes_out = {'nodes': []} for node in result['nodes']: nodes_out['nodes'].append(node['configuredHostname']) print(json.dumps(nodes_out)) else: for node in result['nodes']: print(node['configuredHostname']) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-host-list") @staticmethod def get_description(): return "List all hosts in a cluster" class ResetCipherSuites(LocalSubcommand): """The reset cipher suites subcommand """ def __init__(self): super(ResetCipherSuites, self).__init__() self.parser.prog = "couchbase-cli reset-cipher-suites" group = self.parser.add_argument_group("Reset Cipher Suites") group.add_argument("--force", action='store_true', default=False, help="Force resetting of the cipher suites") group.add_argument("-P", "--port", metavar="<port>", default="8091", help="The REST API port, defaults to 8091") def execute(self, opts): token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip() rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token) check_cluster_initialized(rest) check_versions(rest) if not opts.force: confirm = str(input("Are you sure that the cipher should be reset?: Y/[N]")) if confirm != "Y": _success("Cipher suites have not been reset to default") _, errors = rest.reset_cipher_suites() _exit_if_errors(errors) _success("Cipher suites have been reset to the default") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-reset-cipher-suites") @staticmethod def get_description(): return "Rests cipher suites to the default" class MasterPassword(LocalSubcommand): """The master password subcommand""" def __init__(self): super(MasterPassword, self).__init__() self.parser.prog = "couchbase-cli master-password" group = self.parser.add_argument_group("Master password options") group.add_argument("--send-password", dest="send_password", metavar="<password>", required=False, action=CBNonEchoedAction, envvar=None, prompt_text="Enter master password:", help="Sends the master password to start the server") def execute(self, opts): if opts.send_password is not None: path = [CB_BIN_PATH, os.environ['PATH']] if os.name == 'posix': os.environ['PATH'] = ':'.join(path) else: os.environ['PATH'] = ';'.join(path) cookiefile = os.path.join(opts.config_path, "couchbase-server.babysitter.cookie") if not os.path.isfile(cookiefile): _exit_if_errors(["The node is down"]) cookie = _exit_on_file_read_failure(cookiefile, "Insufficient privileges to send master password - Please" " execute this command as a operating system user who has" " file system read permission on the Couchbase Server " " configuration").rstrip() nodefile = os.path.join(opts.config_path, "couchbase-server.babysitter.node") node = _exit_on_file_read_failure(nodefile).rstrip() self.prompt_for_master_pwd(node, cookie, opts.send_password, opts.config_path) else: _exit_if_errors(["No parameters set"]) def prompt_for_master_pwd(self, node, cookie, password, cb_cfg_path): ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin") babystr_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_babysitter", "ebin") inetrc_file = os.path.join(CB_ETC_PATH, "hosts.cfg") dist_cfg_file = os.path.join(cb_cfg_path, "config", "dist_cfg") if password == '': password = getpass.getpass("\nEnter master password:") name = 'executioner@cb.local' args = ['-pa', ns_server_ebin_path, babystr_ebin_path, '-noinput', '-name', name, '-proto_dist', 'cb', '-epmd_module', 'cb_epmd', '-kernel', 'inetrc', f'"{inetrc_file}"', 'dist_config_file', f'"{dist_cfg_file}"', '-setcookie', cookie, '-run', 'encryption_service', 'remote_set_password', node, password] rc, out, err = self.run_process("erl", args) if rc == 0: print("SUCCESS: Password accepted. Node started booting.") elif rc == 101: print("Incorrect password.") self.prompt_for_master_pwd(node, cookie, '', cb_cfg_path) elif rc == 102: _exit_if_errors(["Password was already supplied"]) elif rc == 103: _exit_if_errors(["The node is down"]) elif rc == 104: _exit_if_errors(["Incorrect password. Node shuts down."]) else: _exit_if_errors([f'Unknown error: {rc} {out}, {err}']) def run_process(self, name, args): try: if os.name == "nt": name = name + ".exe" args.insert(0, name) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = p.stdout.read() error = p.stderr.read() p.wait() rc = p.returncode return rc, output, error except OSError: _exit_if_errors([f'Could not locate the {name} executable']) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-master-password") @staticmethod def get_description(): return "Unlocking the master password" class NodeInit(Subcommand): """The node initialization subcommand""" def __init__(self): super(NodeInit, self).__init__() self.parser.prog = "couchbase-cli node-init" group = self.parser.add_argument_group("Node initialization options") group.add_argument("--node-init-data-path", dest="data_path", metavar="<path>", help="The path to store database files") group.add_argument("--node-init-index-path", dest="index_path", metavar="<path>", help="The path to store index files") group.add_argument("--node-init-analytics-path", dest="analytics_path", metavar="<path>", action="append", help="The path to store analytics files (supply one parameter for each path desired)") group.add_argument("--node-init-eventing-path", dest="eventing_path", metavar="<path>", help="The path to store eventing files") group.add_argument("--node-init-java-home", dest="java_home", metavar="<path>", help="The path of the Java Runtime Environment (JRE) to use on this server") group.add_argument("--node-init-hostname", dest="hostname", metavar="<hostname>", help="Sets the hostname for this server") group.add_argument("--ipv6", dest="ipv6", action="store_true", default=False, help="Configure the node to communicate via ipv6") group.add_argument("--ipv4", dest="ipv4", action="store_true", default=False, help="Configure the node to communicate via ipv4") @rest_initialiser() def execute(self, opts): # Cluster does not need to be initialized for this command if (opts.data_path is None and opts.index_path is None and opts.analytics_path is None and opts.eventing_path is None and opts.java_home is None and opts.hostname is None and opts.ipv6 is None and opts.ipv4 is None): _exit_if_errors(["No node initialization parameters specified"]) if opts.ipv4 and opts.ipv6: _exit_if_errors(["Use either --ipv4 or --ipv6"]) if opts.ipv4: afamily = 'ipv4' elif opts.ipv6: afamily = 'ipv6' else: afamily = None _, errors = self.rest.node_init(hostname=opts.hostname, afamily=afamily, data_path=opts.data_path, index_path=opts.index_path, cbas_path=opts.analytics_path, eventing_path=opts.eventing_path, java_home=opts.java_home) _exit_if_errors(errors) _success("Node initialized") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-node-init") @staticmethod def get_description(): return "Set node specific settings" class Rebalance(Subcommand): """The rebalance subcommand""" def __init__(self): super(Rebalance, self).__init__() self.parser.prog = "couchbase-cli rebalance" group = self.parser.add_argument_group("Rebalance options") group.add_argument("--server-remove", dest="server_remove", metavar="<server_list>", help="A list of servers to remove from the cluster") group.add_argument("--no-progress-bar", dest="no_bar", action="store_true", default=False, help="Disables the progress bar") group.add_argument("--no-wait", dest="wait", action="store_false", default=True, help="Don't wait for rebalance completion") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): eject_nodes = [] if opts.server_remove: eject_nodes = apply_default_port(opts.server_remove) _, errors = self.rest.rebalance(eject_nodes) _exit_if_errors(errors) time.sleep(1) if opts.wait: bar = TopologyProgressBar(self.rest, 'Rebalancing', opts.no_bar) errors = bar.show() _exit_if_errors(errors) _success("Rebalance complete") else: _success("Rebalance started") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-rebalance") @staticmethod def get_description(): return "Start a cluster rebalancing" class RebalanceStatus(Subcommand): """The rebalance status subcommand""" def __init__(self): super(RebalanceStatus, self).__init__() self.parser.prog = "couchbase-cli rebalance-status" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): status, errors = self.rest.rebalance_status() _exit_if_errors(errors) print(json.dumps(status, indent=2)) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-rebalance-status") @staticmethod def get_description(): return "Show rebalance status" class RebalanceStop(Subcommand): """The rebalance stop subcommand""" def __init__(self): super(RebalanceStop, self).__init__() self.parser.prog = "couchbase-cli rebalance-stop" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.stop_rebalance() _exit_if_errors(errors) _success("Rebalance stopped") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-rebalance-stop") @staticmethod def get_description(): return "Stop a rebalance" class Recovery(Subcommand): """The recovery command""" def __init__(self): super(Recovery, self).__init__() self.parser.prog = "couchbase-cli recovery" group = self.parser.add_argument_group("Recovery options") group.add_argument("--server-recovery", dest="servers", metavar="<server_list>", required=True, help="The list of servers to recover") group.add_argument("--recovery-type", dest="recovery_type", metavar="type", choices=["delta", "full"], default="delta", help="The recovery type (delta or full)") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): servers = apply_default_port(opts.servers) for server in servers: _, errors = self.rest.recovery(server, opts.recovery_type) _exit_if_errors(errors) _success("Servers recovered") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-recovery") @staticmethod def get_description(): return "Recover one or more servers" class ResetAdminPassword(LocalSubcommand): """The reset admin password command""" def __init__(self): super(ResetAdminPassword, self).__init__() self.parser.prog = "couchbase-cli reset-admin-password" group = self.parser.add_argument_group("Reset password options") group.add_argument("--new-password", dest="new_password", metavar="<password>", required=False, action=CBNonEchoedAction, envvar=None, prompt_text="Enter new administrator password:", confirm_text="Confirm new administrator password:", help="The new administrator password") group.add_argument("--regenerate", dest="regenerate", action="store_true", help="Generates a random administrator password") group.add_argument("-P", "--port", metavar="<port>", default="8091", help="The REST API port, defaults to 8091") def execute(self, opts): token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip() rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token) check_cluster_initialized(rest) check_versions(rest) if opts.new_password is not None and opts.regenerate: _exit_if_errors(["Cannot specify both --new-password and --regenerate at the same time"]) elif opts.new_password is not None: _, errors = rest.set_admin_password(opts.new_password) _exit_if_errors(errors) _success("Administrator password changed") elif opts.regenerate: result, errors = rest.regenerate_admin_password() _exit_if_errors(errors) print(result["password"]) else: _exit_if_errors(["No parameters specified"]) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-reset-admin-password") @staticmethod def get_description(): return "Resets the administrator password" class ServerAdd(Subcommand): """The server add command""" def __init__(self): super(ServerAdd, self).__init__() self.parser.prog = "couchbase-cli server-add" group = self.parser.add_argument_group("Server add options") group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True, help="The list of servers to add") group.add_argument("--server-add-username", dest="server_username", metavar="<username>", required=True, help="The username for the server to add") group.add_argument("--server-add-password", dest="server_password", metavar="<password>", required=True, help="The password for the server to add") group.add_argument("--group-name", dest="group_name", metavar="<name>", help="The server group to add this server into") group.add_argument("--services", dest="services", default="data", metavar="<services>", help="The services this server will run") group.add_argument("--index-storage-setting", dest="index_storage_mode", metavar="<mode>", choices=["default", "memopt"], help="The index storage mode") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if not self.enterprise and opts.index_storage_mode == 'memopt': _exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"]) opts.services, errors = process_services(opts.services, self.enterprise) _exit_if_errors(errors) settings, errors = self.rest.index_settings() _exit_if_errors(errors) if opts.index_storage_mode is None and settings['storageMode'] == "" and "index" in opts.services: opts.index_storage_mode = "default" # For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0 default = "plasma" if opts.index_storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise: default = "forestdb" if opts.index_storage_mode: param = index_storage_mode_to_param(opts.index_storage_mode, default) _, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None) _exit_if_errors(errors) servers = opts.servers.split(',') for server in servers: _, errors = self.rest.add_server(server, opts.group_name, opts.server_username, opts.server_password, opts.services) _exit_if_errors(errors) _success("Server added") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-server-add") @staticmethod def get_description(): return "Add servers to the cluster" class ServerEshell(Subcommand): """The server eshell subcommand""" def __init__(self): super(ServerEshell, self).__init__() self.parser.prog = "couchbase-cli server-eshell" group = self.parser.add_argument_group("Server eshell options") group.add_argument("--vm", dest="vm", default="ns_server", metavar="<name>", help="The vm to connect to") group.add_argument("--erl-path", dest="erl_path", metavar="<path>", default=CB_BIN_PATH, help="Override the path to the erl executable") @rest_initialiser(version_check=True) def execute(self, opts): # Cluster does not need to be initialized for this command result, errors = self.rest.node_info() _exit_if_errors(errors) node = result['otpNode'] cookie = result['otpCookie'] if opts.vm != 'ns_server': cookie, errors = self.rest.get_babysitter_cookie() _exit_if_errors(errors) [short, _] = node.split('@') if opts.vm == 'babysitter': node = f'babysitter_of_{short}@cb.local' elif opts.vm == 'couchdb': node = f'couchdb_{short}@cb.local' else: _exit_if_errors([f'Unknown vm type `{opts.vm}`']) rand_chars = ''.join(random.choice(string.ascii_letters) for _ in range(20)) name = f'ctl-{rand_chars}@127.0.0.1' cb_erl = os.path.join(opts.erl_path, 'erl') if os.path.isfile(cb_erl): path = cb_erl else: _warning("Cannot locate Couchbase erlang. Attempting to use non-Couchbase erlang") path = 'erl' inetrc_file = os.path.join(CB_ETC_PATH, 'hosts.cfg') if os.path.isfile(inetrc_file): inetrc_opt = ['-kernel', 'inetrc', f'"{inetrc_file}"'] else: inetrc_opt = [] ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin") with tempfile.NamedTemporaryFile() as temp: temp.write(f'[{{preferred_local_proto,{result["addressFamily"]}_tcp_dist}}].'.encode()) temp.flush() temp_name = temp.name args = [path, '-name', name, '-setcookie', cookie, '-hidden', '-remsh', node, '-proto_dist', 'cb', '-epmd_module', 'cb_epmd', '-pa', ns_server_ebin_path, '-kernel', 'dist_config_file', f'"{temp_name}"'] + inetrc_opt if opts.debug: print(f'Running {" ".join(args)}') try: subprocess.call(args) except OSError: _exit_if_errors(["Unable to find the erl executable"]) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-server-eshell") @staticmethod def get_description(): return "Opens a shell to the Couchbase cluster manager" @staticmethod def is_hidden(): # Internal command not recommended for production use return True class ServerInfo(Subcommand): """The server info subcommand""" def __init__(self): super(ServerInfo, self).__init__() self.parser.prog = "couchbase-cli server-info" @rest_initialiser(version_check=True) def execute(self, opts): # Cluster does not need to be initialized for this command result, errors = self.rest.node_info() _exit_if_errors(errors) print(json.dumps(result, sort_keys=True, indent=2)) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-server-info") @staticmethod def get_description(): return "Show details of a node in the cluster" class ServerList(Subcommand): """The server list subcommand""" def __init__(self): super(ServerList, self).__init__() self.parser.prog = "couchbase-cli server-list" @rest_initialiser(version_check=True) def execute(self, opts): result, errors = self.rest.pools('default') _exit_if_errors(errors) for node in result['nodes']: if node.get('otpNode') is None: raise Exception("could not access node") print(node['otpNode'], node['hostname'], node['status'], node['clusterMembership']) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-server-list") @staticmethod def get_description(): return "List all nodes in a cluster" class ServerReadd(Subcommand): """The server readd subcommand (Deprecated)""" def __init__(self): super(ServerReadd, self).__init__() self.parser.prog = "couchbase-cli server-readd" group = self.parser.add_argument_group("Server re-add options") group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True, help="The list of servers to recover") # The parameters are unused, but kept for backwards compatibility group.add_argument("--server-username", dest="server_username", metavar="<username>", help="The admin username for the server") group.add_argument("--server-password", dest="server_password", metavar="<password>", help="The admin password for the server") group.add_argument("--group-name", dest="name", metavar="<name>", help="The name of the server group") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _deprecated("Please use the recovery command instead") servers = apply_default_port(opts.servers) for server in servers: _, errors = self.rest.readd_server(server) _exit_if_errors(errors) _success("Servers recovered") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-server-readd") @staticmethod def get_description(): return "Add failed server back to the cluster" @staticmethod def is_hidden(): # Deprecated command in 4.6, hidden in 5.0, pending removal return True class SettingAlert(Subcommand): """The setting alert subcommand""" def __init__(self): super(SettingAlert, self).__init__() self.parser.prog = "couchbase-cli setting-alert" group = self.parser.add_argument_group("Alert settings") group.add_argument("--enable-email-alert", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enable/disable email alerts") group.add_argument("--email-recipients", dest="email_recipients", metavar="<email_list>", help="A comma separated list of email addresses") group.add_argument("--email-sender", dest="email_sender", metavar="<email_addr>", help="The sender email address") group.add_argument("--email-user", dest="email_username", metavar="<username>", default="", help="The email server username") group.add_argument("--email-password", dest="email_password", metavar="<password>", default="", help="The email server password") group.add_argument("--email-host", dest="email_host", metavar="<host>", help="The email server host") group.add_argument("--email-port", dest="email_port", metavar="<port>", help="The email server port") group.add_argument("--enable-email-encrypt", dest="email_encrypt", metavar="<1|0>", choices=["0", "1"], help="Enable SSL encryption for emails") group.add_argument("--alert-auto-failover-node", dest="alert_af_node", action="store_true", help="Alert when a node is auto-failed over") group.add_argument("--alert-auto-failover-max-reached", dest="alert_af_max_reached", action="store_true", help="Alert when the max number of auto-failover nodes was reached") group.add_argument("--alert-auto-failover-node-down", dest="alert_af_node_down", action="store_true", help="Alert when a node wasn't auto-failed over because other nodes " + "were down") group.add_argument("--alert-auto-failover-cluster-small", dest="alert_af_small", action="store_true", help="Alert when a node wasn't auto-failed over because cluster was" + " too small") group.add_argument("--alert-auto-failover-disable", dest="alert_af_disable", action="store_true", help="Alert when a node wasn't auto-failed over because auto-failover" + " is disabled") group.add_argument("--alert-ip-changed", dest="alert_ip_changed", action="store_true", help="Alert when a nodes IP address changed") group.add_argument("--alert-disk-space", dest="alert_disk_space", action="store_true", help="Alert when disk usage on a node reaches 90%%") group.add_argument("--alert-meta-overhead", dest="alert_meta_overhead", action="store_true", help="Alert when metadata overhead is more than 50%%") group.add_argument("--alert-meta-oom", dest="alert_meta_oom", action="store_true", help="Alert when all bucket memory is used for metadata") group.add_argument("--alert-write-failed", dest="alert_write_failed", action="store_true", help="Alert when writing data to disk has failed") group.add_argument("--alert-audit-msg-dropped", dest="alert_audit_dropped", action="store_true", help="Alert when writing event to audit log failed") group.add_argument("--alert-indexer-max-ram", dest="alert_indexer_max_ram", action="store_true", help="Alert when indexer is using all of its allocated memory") group.add_argument("--alert-timestamp-drift-exceeded", dest="alert_cas_drift", action="store_true", help="Alert when clocks on two servers are more than five seconds" + "apart") group.add_argument("--alert-communication-issue", dest="alert_communication_issue", action="store_true", help="Alert when nodes are experiencing communication issues") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.enabled == "1": if opts.email_recipients is None: _exit_if_errors(["--email-recipient must be set when email alerts are enabled"]) if opts.email_sender is None: _exit_if_errors(["--email-sender must be set when email alerts are enabled"]) if opts.email_host is None: _exit_if_errors(["--email-host must be set when email alerts are enabled"]) if opts.email_port is None: _exit_if_errors(["--email-port must be set when email alerts are enabled"]) alerts = list() if opts.alert_af_node: alerts.append('auto_failover_node') if opts.alert_af_max_reached: alerts.append('auto_failover_maximum_reached') if opts.alert_af_node_down: alerts.append('auto_failover_other_nodes_down') if opts.alert_af_small: alerts.append('auto_failover_cluster_too_small') if opts.alert_af_disable: alerts.append('auto_failover_disabled') if opts.alert_ip_changed: alerts.append('ip') if opts.alert_disk_space: alerts.append('disk') if opts.alert_meta_overhead: alerts.append('overhead') if opts.alert_meta_oom: alerts.append('ep_oom_errors') if opts.alert_write_failed: alerts.append('ep_item_commit_failed') if opts.alert_audit_dropped: alerts.append('audit_dropped_events') if opts.alert_indexer_max_ram: alerts.append('indexer_ram_max_usage') if opts.alert_cas_drift: alerts.append('ep_clock_cas_drift_threshold_exceeded') if opts.alert_communication_issue: alerts.append('communication_issue') enabled = "true" if opts.enabled == "0": enabled = "false" email_encrypt = "false" if opts.email_encrypt == "1": email_encrypt = "true" _, errors = self.rest.set_alert_settings(enabled, opts.email_recipients, opts.email_sender, opts.email_username, opts.email_password, opts.email_host, opts.email_port, email_encrypt, ",".join(alerts)) _exit_if_errors(errors) _success("Email alert settings modified") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-alert") @staticmethod def get_description(): return "Modify email alert settings" class SettingAudit(Subcommand): """The settings audit subcommand""" def __init__(self): super(SettingAudit, self).__init__() self.parser.prog = "couchbase-cli setting-audit" self.parser.description = "Available only in Couchbase Server Enterprise Edition" group = self.parser.add_argument_group("Audit settings") group.add_argument("--list-filterable-events", dest="list_events", action="store_true", help="Retrieve a list of filterable event IDs and the descriptions") group.add_argument("--get-settings", dest="get_settings", action="store_true", help="Retrieve current audit settings") group.add_argument("--set", dest="set_settings", action="store_true", help="Set current audit settings") group.add_argument("--audit-enabled", dest="enabled", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auditing") group.add_argument("--audit-log-path", dest="log_path", metavar="<path>", help="The audit log path") group.add_argument("--audit-log-rotate-interval", dest="rotate_interval", type=(int), metavar="<seconds>", help="The audit log rotate interval") group.add_argument("--audit-log-rotate-size", dest="rotate_size", type=(int), metavar="<bytes>", help="The audit log rotate size") group.add_argument("--disabled-users", dest="disabled_users", default=None, help="A comma-separated list of users to ignore events from") group.add_argument("--disable-events", dest="disable_events", default=None, help="A comma-separated list of audit-event IDs to not audit") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): flags = sum([opts.list_events, opts.get_settings, opts.set_settings]) if flags != 1: _exit_if_errors(["One of the following is required: --list-filterable-events, --get-settings or --set"]) if opts.list_events: descriptors, errors = self.rest.get_id_descriptors() _exit_if_errors(errors) if opts.output == 'json': print(json.dumps(descriptors, indent=4)) return self.format_descriptors_in_table(descriptors) elif opts.get_settings: audit_settings, errors = self.rest.get_audit_settings() _exit_if_errors(errors) if opts.output == 'json': print(json.dumps(audit_settings, indent=4)) return descriptors, errors = self.rest.get_id_descriptors() _exit_if_errors(errors) self.format_audit_settings(audit_settings, descriptors) elif opts.set_settings: if not (opts.enabled or opts.log_path or opts.rotate_interval or opts.rotate_size or opts.disable_events is not None or opts.disabled_users is not None): _exit_if_errors(["At least one of [--audit-enabled, --audit-log-path, --audit-log-rotate-interval," " --audit-log-rotate-size, --disabled-users, --disable-events] is required with" " --set"]) # Match the behavior in the WebUI, which is to internally translate the '/couchbase' postfix into '/local' # see MB-46970 for more information. if opts.disabled_users is not None: opts.disabled_users = re.sub(r'\/couchbase', '/local', opts.disabled_users) _, errors = self.rest.set_audit_settings(opts.enabled, opts.log_path, opts.rotate_interval, opts.rotate_size, opts.disable_events, opts.disabled_users) _exit_if_errors(errors) _success("Audit settings modified") @staticmethod def format_audit_settings(audit_settings, json_descriptors): print(f'Audit enabled: {audit_settings["auditdEnabled"]}') print(f'UUID: {audit_settings["uid"]}') print(f'Log path: {audit_settings["logPath"] if "logPath" in audit_settings else "N/A"}') print(f'Rotate interval: {audit_settings["rotateInterval"]}') print(f'Rotate size: {audit_settings["rotateSize"]}') print(f'Disabled users: {audit_settings["disabledUsers"]}') if not audit_settings["auditdEnabled"]: return # change id lists to maps to make lookup o(1) disable_map = {eventID for eventID in audit_settings['disabled']} json_descriptors.sort(key=itemgetter('module', 'id')) all_descriptors_sets = {events["id"] for events in json_descriptors} padding_name = 12 for descriptor in json_descriptors: if len(descriptor['name']) > padding_name: padding_name = len(descriptor['name']) padding_name += 2 header = f'{"ID":<6}| {"Module":<15}| {"Name":<{padding_name}}| Enabled' print(header) print('-' * len(header)) for descriptor in json_descriptors: print(f'{descriptor["id"]:<6}| {descriptor["module"]:<15}| {descriptor["name"]:<{padding_name}}| ' f'{"False" if descriptor["id"] in disable_map else "True"}') not_recognized = disable_map - all_descriptors_sets for unrecognized in not_recognized: print(f'{unrecognized:<6}| {"unknown":<15}| {"unknown":<{padding_name}}| False') @staticmethod def format_descriptors_in_table(json_descriptors): sorted_descriptors = sorted(json_descriptors, key=itemgetter('module', 'id')) padding_name = 15 for descriptor in sorted_descriptors: if len(descriptor['name']) > padding_name: padding_name = len(descriptor['name']) padding_name += 2 header = f'{"ID":<6}| {"Module":<15}| {"Name":<{padding_name}}| Description' print(header) print('-' * len(header)) for descriptor in sorted_descriptors: print(f'{descriptor["id"]:<6}| {descriptor["module"]:<15}| {descriptor["name"]:<{padding_name}}| ' f'{descriptor["description"]}') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-audit") @staticmethod def get_description(): return "Modify audit settings" class SettingAutofailover(Subcommand): """The settings auto-failover subcommand""" def __init__(self): super(SettingAutofailover, self).__init__() self.parser.prog = "couchbase-cli setting-autofailover" group = self.parser.add_argument_group("Auto-failover settings") group.add_argument("--enable-auto-failover", dest="enabled", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover") group.add_argument("--auto-failover-timeout", dest="timeout", metavar="<seconds>", type=(int), help="The auto-failover timeout") group.add_argument("--enable-failover-of-server-groups", dest="enable_failover_of_server_groups", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover of server Groups") group.add_argument("--max-failovers", dest="max_failovers", metavar="<1|2|3>", choices=["1", "2", "3"], help="Maximum number of times an auto-failover event can happen") group.add_argument("--enable-failover-on-data-disk-issues", dest="enable_failover_on_data_disk_issues", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover when the Data Service reports disk issues. " + "Couchbase Server Enterprise Edition only.") group.add_argument("--failover-data-disk-period", dest="failover_on_data_disk_period", metavar="<seconds>", type=(int), help="The amount of time the Data Serivce disk failures has to be happening for to trigger" " an auto-failover") group.add_argument("--can-abort-rebalance", metavar="<1|0>", choices=["1", "0"], dest="can_abort_rebalance", help="Enables auto-failover to abort rebalance and perform the failover. (EE only)") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if opts.enabled == "1": opts.enabled = "true" elif opts.enabled == "0": opts.enabled = "false" if opts.enable_failover_on_data_disk_issues == "1": opts.enable_failover_on_data_disk_issues = "true" elif opts.enable_failover_on_data_disk_issues == "0": opts.enable_failover_on_data_disk_issues = "false" if opts.enable_failover_of_server_groups == "1": opts.enable_failover_of_server_groups = "true" elif opts.enable_failover_of_server_groups == "0": opts.enable_failover_of_server_groups = "false" if not self.enterprise: if opts.enable_failover_of_server_groups: _exit_if_errors(["--enable-failover-of-server-groups can only be configured on enterprise edition"]) if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period: _exit_if_errors(["Auto failover on Data Service disk issues can only be configured on enterprise" + " edition"]) if opts.max_failovers: _exit_if_errors(["--max-count can only be configured on enterprise edition"]) if opts.can_abort_rebalance: _exit_if_errors(["--can-abort-rebalance can only be configured on enterprise edition"]) if not any([opts.enabled, opts.timeout, opts.enable_failover_on_data_disk_issues, opts.failover_on_data_disk_period, opts.enable_failover_of_server_groups, opts.max_failovers]): _exit_if_errors(["No settings specified to be changed"]) if ((opts.enable_failover_on_data_disk_issues is None or opts.enable_failover_on_data_disk_issues == "false") and opts.failover_on_data_disk_period): _exit_if_errors(["--enable-failover-on-data-disk-issues must be set to 1 when auto-failover Data" " Service disk period has been set"]) if opts.enable_failover_on_data_disk_issues and opts.failover_on_data_disk_period is None: _exit_if_errors(["--failover-data-disk-period must be set when auto-failover on Data Service disk" " is enabled"]) if opts.enabled == "false" or opts.enabled is None: if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period: _exit_if_errors(["--enable-auto-failover must be set to 1 when auto-failover on Data Service disk issues" " settings are being configured"]) if opts.enable_failover_of_server_groups: _exit_if_errors(["--enable-auto-failover must be set to 1 when enabling auto-failover of Server Groups"]) if opts.timeout: _warning("Timeout specified will not take affect because auto-failover is being disabled") if opts.can_abort_rebalance == '1': opts.can_abort_rebalance = 'true' elif opts.can_abort_rebalance == '0': opts.can_abort_rebalance = 'false' _, errors = self.rest.set_autofailover_settings(opts.enabled, opts.timeout, opts.enable_failover_of_server_groups, opts.max_failovers, opts.enable_failover_on_data_disk_issues, opts.failover_on_data_disk_period, opts.can_abort_rebalance) _exit_if_errors(errors) _success("Auto-failover settings modified") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-autofailover") @staticmethod def get_description(): return "Modify auto failover settings" class SettingAutoreprovision(Subcommand): """The settings auto-reprovision subcommand""" def __init__(self): super(SettingAutoreprovision, self).__init__() self.parser.prog = "couchbase-cli setting-autoreprovision" group = self.parser.add_argument_group("Auto-reprovision settings") group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enable/disable auto-reprovision") group.add_argument("--max-nodes", dest="max_nodes", metavar="<num>", type=(int), help="The numbers of server that can be auto-reprovisioned before a rebalance") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.enabled == "1": opts.enabled = "true" elif opts.enabled == "0": opts.enabled = "false" if opts.enabled == "true" and opts.max_nodes is None: _exit_if_errors(["--max-nodes must be specified if auto-reprovision is enabled"]) if not (opts.enabled or opts.max_nodes): _exit_if_errors(["No settings specified to be changed"]) if (opts.enabled is None or opts.enabled == "false") and opts.max_nodes: _warning("--max-servers will not take affect because auto-reprovision is being disabled") _, errors = self.rest.set_autoreprovision_settings(opts.enabled, opts.max_nodes) _exit_if_errors(errors) _success("Auto-reprovision settings modified") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-autoreprovision") @staticmethod def get_description(): return "Modify auto-reprovision settings" class SettingCluster(Subcommand): """The settings cluster subcommand""" def __init__(self): super(SettingCluster, self).__init__() self.parser.prog = "couchbase-cli setting-cluster" group = self.parser.add_argument_group("Cluster settings") group.add_argument("--cluster-username", dest="new_username", metavar="<username>", help="The cluster administrator username") group.add_argument("--cluster-password", dest="new_password", metavar="<password>", help="Only compact the data files") group.add_argument("--cluster-port", dest="port", type=(int), metavar="<port>", help="The cluster administration console port") group.add_argument("--cluster-ramsize", dest="data_mem_quota", metavar="<quota>", type=(int), help="The data service memory quota in mebibytes") group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", metavar="<quota>", type=(int), help="The index service memory quota in mebibytes") group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", metavar="<quota>", type=(int), help="The full-text service memory quota in mebibytes") group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", metavar="<quota>", type=(int), help="The Eventing service memory quota in mebibytes") group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", metavar="<quota>", type=(int), help="The analytics service memory quota in mebibytes") group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if (opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota or opts.eventing_mem_quota or opts.name): _, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota, opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name) _exit_if_errors(errors) if opts.new_username or opts.new_password or opts.port: username = opts.username if opts.new_username: username = opts.new_username password = opts.password if opts.new_password: password = opts.new_password _, errors = self.rest.set_admin_credentials(username, password, opts.port) _exit_if_errors(errors) _success("Cluster settings modified") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-cluster") @staticmethod def get_description(): return "Modify cluster settings" class ClusterEdit(SettingCluster): """The cluster edit subcommand (Deprecated)""" def __init__(self): super(ClusterEdit, self).__init__() self.parser.prog = "couchbase-cli cluster-edit" def execute(self, opts): _deprecated("Please use the setting-cluster command instead") super(ClusterEdit, self).execute(opts) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-cluster-edit") @staticmethod def is_hidden(): # Deprecated command in 4.6, hidden in 5.0, pending removal return True class SettingCompaction(Subcommand): """The setting compaction subcommand""" def __init__(self): super(SettingCompaction, self).__init__() self.parser.prog = "couchbase-cli setting-compaction" group = self.parser.add_argument_group("Compaction settings") group.add_argument("--compaction-db-percentage", dest="db_perc", metavar="<perc>", type=(int), help="Compacts the db once the fragmentation reaches this percentage") group.add_argument("--compaction-db-size", dest="db_size", metavar="<mebibytes>", type=(int), help="Compacts db once the fragmentation reaches this size (MiB)") group.add_argument("--compaction-view-percentage", dest="view_perc", metavar="<perc>", type=(int), help="Compacts the view once the fragmentation reaches this percentage") group.add_argument("--compaction-view-size", dest="view_size", metavar="<mebibytes>", type=(int), help="Compacts view once the fragmentation reaches this size (MiB)") group.add_argument("--compaction-period-from", dest="from_period", metavar="<HH:MM>", help="Only run compaction after this time") group.add_argument("--compaction-period-to", dest="to_period", metavar="<HH:MM>", help="Only run compaction before this time") group.add_argument("--enable-compaction-abort", dest="enable_abort", metavar="<1|0>", choices=["0", "1"], help="Allow compactions to be aborted") group.add_argument("--enable-compaction-parallel", dest="enable_parallel", metavar="<1|0>", choices=["0", "1"], help="Allow parallel compactions") group.add_argument("--metadata-purge-interval", dest="purge_interval", metavar="<float>", type=(float), help="The metadata purge interval") group.add_argument("--gsi-compaction-mode", dest="gsi_mode", choices=["append", "circular"], help="Sets the gsi compaction mode (append or circular)") group.add_argument("--compaction-gsi-percentage", dest="gsi_perc", type=(int), metavar="<perc>", help="Starts compaction once gsi file fragmentation has reached this percentage" + "(Append mode only)") group.add_argument("--compaction-gsi-interval", dest="gsi_interval", metavar="<days>", help="A comma separated list of days compaction can run (Circular mode only)") group.add_argument("--compaction-gsi-period-from", dest="gsi_from_period", metavar="<HH:MM>", help="Allow gsi compaction to run after this time (Circular mode only)") group.add_argument("--compaction-gsi-period-to", dest="gsi_to_period", metavar="<HH:MM>", help="Allow gsi compaction to run before this time (Circular mode only)") group.add_argument("--enable-gsi-compaction-abort", dest="enable_gsi_abort", metavar="<1|0>", choices=["0", "1"], help="Abort gsi compaction if when run outside of the accepted interaval" + "(Circular mode only)") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.db_perc is not None and (opts.db_perc < 2 or opts.db_perc > 100): _exit_if_errors(["--compaction-db-percentage must be between 2 and 100"]) if opts.view_perc is not None and (opts.view_perc < 2 or opts.view_perc > 100): _exit_if_errors(["--compaction-view-percentage must be between 2 and 100"]) if opts.db_size is not None: if int(opts.db_size) < 1: _exit_if_errors(["--compaction-db-size must be between greater than 1 or infinity"]) opts.db_size = int(opts.db_size) * 1024**2 if opts.view_size is not None: if int(opts.view_size) < 1: _exit_if_errors(["--compaction-view-size must be between greater than 1 or infinity"]) opts.view_size = int(opts.view_size) * 1024**2 if opts.from_period and not (opts.to_period and opts.enable_abort): errors = [] if opts.to_period is None: errors.append("--compaction-period-to is required when using --compaction-period-from") if opts.enable_abort is None: errors.append("--enable-compaction-abort is required when using --compaction-period-from") _exit_if_errors(errors) if opts.to_period and not (opts.from_period and opts.enable_abort): errors = [] if opts.from_period is None: errors.append("--compaction-period-from is required when using --compaction-period-to") if opts.enable_abort is None: errors.append("--enable-compaction-abort is required when using --compaction-period-to") _exit_if_errors(errors) if opts.enable_abort and not (opts.from_period and opts.to_period): errors = [] if opts.from_period is None: errors.append("--compaction-period-from is required when using --enable-compaction-abort") if opts.to_period is None: errors.append("--compaction-period-to is required when using --enable-compaction-abort") _exit_if_errors(errors) from_hour, from_min = self._handle_timevalue(opts.from_period, "--compaction-period-from") to_hour, to_min = self._handle_timevalue(opts.to_period, "--compaction-period-to") if opts.enable_abort == "1": opts.enable_abort = "true" elif opts.enable_abort == "0": opts.enable_abort = "false" if opts.enable_parallel == "1": opts.enable_parallel = "true" else: opts.enable_parallel = "false" if opts.purge_interval is not None and (opts.purge_interval < 0.04 or opts.purge_interval > 60.0): _exit_if_errors(["--metadata-purge-interval must be between 0.04 and 60.0"]) g_from_hour = None g_from_min = None g_to_hour = None g_to_min = None if opts.gsi_mode == "append": opts.gsi_mode = "full" if opts.gsi_perc is None: _exit_if_errors(['--compaction-gsi-percentage must be specified when --gsi-compaction-mode is set ' 'to append']) elif opts.gsi_mode == "circular": if opts.gsi_from_period is not None and opts.gsi_to_period is None: _exit_if_errors(["--compaction-gsi-period-to is required with --compaction-gsi-period-from"]) if opts.gsi_to_period is not None and opts.gsi_from_period is None: _exit_if_errors(["--compaction-gsi-period-from is required with --compaction-gsi-period-to"]) g_from_hour, g_from_min = self._handle_timevalue(opts.gsi_from_period, "--compaction-gsi-period-from") g_to_hour, g_to_min = self._handle_timevalue(opts.gsi_to_period, "--compaction-gsi-period-to") if opts.enable_gsi_abort == "1": opts.enable_gsi_abort = "true" else: opts.enable_gsi_abort = "false" _, errors = self.rest.set_compaction_settings(opts.db_perc, opts.db_size, opts.view_perc, opts.view_size, from_hour, from_min, to_hour, to_min, opts.enable_abort, opts.enable_parallel, opts.purge_interval, opts.gsi_mode, opts.gsi_perc, opts.gsi_interval, g_from_hour, g_from_min, g_to_hour, g_to_min, opts.enable_gsi_abort) _exit_if_errors(errors) _success("Compaction settings modified") def _handle_timevalue(self, opt_value, opt_name): hour = None minute = None if opt_value: if opt_value.find(':') == -1: _exit_if_errors([f'Invalid value for {opt_name}, must be in form XX:XX']) hour, minute = opt_value.split(':', 1) try: hour = int(hour) except ValueError: _exit_if_errors([f'Invalid hour value for {opt_name}, must be an integer']) if hour not in range(24): _exit_if_errors([f'Invalid hour value for {opt_name}, must be 0-23']) try: minute = int(minute) except ValueError: _exit_if_errors([f'Invalid minute value for {opt_name}, must be an integer']) if minute not in range(60): _exit_if_errors([f'Invalid minute value for {opt_name}, must be 0-59']) return hour, minute @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-compaction") @staticmethod def get_description(): return "Modify auto-compaction settings" class SettingIndex(Subcommand): """The setting index subcommand""" def __init__(self): super(SettingIndex, self).__init__() self.parser.prog = "couchbase-cli setting-index" group = self.parser.add_argument_group("Index settings") group.add_argument("--index-max-rollback-points", dest="max_rollback", metavar="<num>", type=(int), help="Max rollback points") group.add_argument("--index-stable-snapshot-interval", dest="stable_snap", type=(int), metavar="<seconds>", help="Stable snapshot interval in seconds") group.add_argument("--index-memory-snapshot-interval", dest="mem_snap", metavar="<ms>", type=(int), help="Stable snapshot interval in milliseconds") group.add_argument("--index-storage-setting", dest="storage_mode", metavar="<mode>", choices=["default", "memopt"], help="The index storage backend") group.add_argument("--index-threads", dest="threads", metavar="<num>", type=(int), help="The number of indexer threads") group.add_argument("--index-log-level", dest="log_level", metavar="<level>", choices=["debug", "silent", "fatal", "error", "warn", "info", "verbose", "timing", "trace"], help="The indexer log level") group.add_argument('--replicas', metavar='<num>', type=int, help='Number of index replicas') group.add_argument('--optimize-placement', metavar='<1|0>', type=str, help='Optimize index placement on a rebalance.') @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if (opts.max_rollback is None and opts.stable_snap is None and opts.mem_snap is None and opts.storage_mode is None and opts.threads is None and opts.log_level is None and opts.replicas is None and opts.optimize_placement is None): _exit_if_errors(["No settings specified to be changed"]) settings, errors = self.rest.index_settings() _exit_if_errors(errors) # For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0 default = "plasma" if opts.storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise: default = "forestdb" opts.storage_mode = index_storage_mode_to_param(opts.storage_mode, default) _, errors = self.rest.set_index_settings(opts.storage_mode, opts.max_rollback, opts.stable_snap, opts.mem_snap, opts.threads, opts.log_level, opts.replicas, opts.optimize_placement) _exit_if_errors(errors) _success("Indexer settings modified") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-index") @staticmethod def get_description(): return "Modify index settings" class SettingSaslauthd(Subcommand): """The setting sasl subcommand""" def __init__(self): super(SettingSaslauthd, self).__init__() self.parser.prog = "couchbase-cli setting-saslauthd" group = self.parser.add_argument_group("saslauthd settings") group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enable/disable saslauthd") group.add_argument("--admins", dest="admins", metavar="<user_list>", help="A comma separated list of full admins") group.add_argument("--roadmins", dest="roadmins", metavar="<user_list>", help="A comma separated list of read only admins") group.add_argument("--default", dest="default", default="none", choices=["admins", "roadmins", "none"], metavar="<default>", help="Default roles for saslauthd users") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): admins = "" if opts.admins: admins = opts.admins.replace(",", "\n") ro_admins = "" if opts.roadmins: ro_admins = opts.roadmins.replace(",", "\n") errors = None if opts.enabled == '1': if opts.default == 'admins': if ro_admins: _warning("--ro-admins option ignored since default is read only admins") _, errors = self.rest.sasl_settings('true', ro_admins, None) elif opts.default == 'roadmins': if admins: _warning("--admins option ignored since default is admins") _, errors = self.rest.sasl_settings('true', None, admins) else: _, errors = self.rest.sasl_settings('true', ro_admins, admins) else: if admins: _warning("--admins option ignored since saslauthd is being disabled") if ro_admins: _warning("--roadmins option ignored since saslauthd is being disabled") _, errors = self.rest.sasl_settings('false', "", "") _exit_if_errors(errors) _success("saslauthd settings modified") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-saslauthd") @staticmethod def get_description(): return "Modify saslauthd settings" class SettingLdap(Subcommand): """The setting Ldap subcommand""" def __init__(self): super(SettingLdap, self).__init__() self.parser.prog = "couchbase-cli setting-ldap" group = self.parser.add_argument_group("LDAP settings") group.add_argument("--get", dest="get", default=False, action="store_true", help='When the get flag is provided it will retrieve the current ldap settings') group.add_argument("--authentication-enabled", dest="authentication_enabled", metavar="<1|0>", choices=["1", "0"], help="Enable LDAP authentication, otherwise it defaults to disable") group.add_argument("--authorization-enabled", dest="authorization_enabled", metavar="<1|0>", choices=["1", "0"], help="Enable LDAP authorization, otherwise defaults to false") group.add_argument("--hosts", dest="hosts", metavar="<host_list>", help="Coma separated list of LDAP servers") group.add_argument("--port", dest="port", metavar="<port>", help="LDAP port", type=int) group.add_argument("--encryption", dest="encryption", metavar="<tls|startTLS|none>", choices=["tls", "startTLS", "none"], help="Encryption used") group.add_argument("--server-cert-validation", dest="server_cert_val", metavar="<1|0>", choices=["0", "1"], help="Enable or disable certificate validation when connecting to LDAP server") group.add_argument("--ldap-cacert", dest="cacert_ldap", metavar="<path>", help="CA certificate to be used for LDAP server certificate validation, required if" + " certificate validation is not disabled") group.add_argument("--user-dn-query", metavar="<query>", dest="user_dn_query", help="LDAP query to get user's DN. Must contains at least one instance of %%u") group.add_argument("--user-dn-template", metavar="<template>", dest="user_dn_template", help="Template to construct user's DN. Must contain at least one instance of %%u") group.add_argument("--client-cert", metavar="<path>", dest="client_cert", help="The client TLS certificate for authentication") group.add_argument("--client-key", metavar="<path>", dest="client_key", help="The client TLS key for authentication") group.add_argument("--request-timeout", metavar="<ms>", dest="timeout", help="Request time out in milliseconds") group.add_argument("--max-parallel", dest="max_parallel", metavar="<max>", type=int, help="Maximum number of parallel connections that can be established") group.add_argument("--max-cache-size", dest="max_cache_size", metavar="<size>", help="Maximum number of cached LDAP requests") group.add_argument("--cache-value-lifetime", dest="cache_value_lifetime", metavar="<ms>", help="Cache value lifetime in milliseconds") group.add_argument("--bind-dn", dest="bind_dn", metavar="<DN>", help="The DN of a user to bind as to performance lookups") group.add_argument("--bind-password", dest="bind_password", metavar="<password>", help="The password of the bind user") group.add_argument("--group-query", dest="group_query", metavar="<query>", help="LDAP query to get user's groups by username") group.add_argument("--enable-nested-groups", dest="nested_groups", metavar="<1|0>", choices=["0", "1"]) group.add_argument("--nested-group-max-depth", dest="nested_max_depth", metavar="<max>", type=int, help="Maximum number of recursive group requests allowed. [1 - 100]") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=True) def execute(self, opts): if opts.get: data, rv = self.rest.get_ldap() _exit_if_errors(rv) print(json.dumps(data)) else: self._set(opts) def _set(self, opts): if opts.authentication_enabled == '1': opts.authentication_enabled = 'true' elif opts.authentication_enabled == '0': opts.authentication_enabled = 'false' if opts.authorization_enabled == '1': opts.authorization_enabled = 'true' elif opts.authorization_enabled == '0': opts.authorization_enabled = 'false' if opts.server_cert_val == '1': opts.server_cert_val = 'true' elif opts.server_cert_val == '0': opts.server_cert_val = 'false' if opts.server_cert_val == 'false' and opts.cacert_ldap is not None: _exit_if_errors(['--server-cert-validation 0 and --ldap-cert can not be used together']) if opts.cacert_ldap is not None: opts.cacert_ldap = _exit_on_file_read_failure(opts.cacert_ldap) if opts.encryption == "tls": opts.encryption = "TLS" elif opts.encryption == "startTLS": opts.encryption = "StartTLSExtension" elif opts.encryption == "none": opts.encryption = "None" if opts.nested_groups == '1': opts.nested_groups = 'true' elif opts.nested_groups == '0': opts.nested_groups = 'false' if opts.user_dn_query is not None and opts.user_dn_template is not None: _exit_if_errors(['--user-dn-query and --user-dn-template can not be used together']) mapping = None if opts.user_dn_query is not None: mapping = f'{{"query": "{opts.user_dn_query}"}}' if opts.user_dn_template is not None: mapping = f'{{"template": "{opts.user_dn_template}"}}' if (opts.client_cert and not opts.client_key) or (not opts.client_cert and opts.client_key): _exit_if_errors(['--client-cert and --client--key have to be used together']) if opts.client_cert is not None: opts.client_cert = _exit_on_file_read_failure(opts.client_cert) if opts.client_key is not None: opts.client_key = _exit_on_file_read_failure(opts.client_key) _, errors = self.rest.ldap_settings(opts.authentication_enabled, opts.authorization_enabled, opts.hosts, opts.port, opts.encryption, mapping, opts.timeout, opts.max_parallel, opts.max_cache_size, opts.cache_value_lifetime, opts.bind_dn, opts.bind_password, opts.client_cert, opts.client_key, opts.group_query, opts.nested_groups, opts.nested_max_depth, opts.server_cert_val, opts.cacert_ldap) _exit_if_errors(errors) _success("LDAP settings modified") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-ldap") @staticmethod def get_description(): return "Modify LDAP settings" class SettingNotification(Subcommand): """The settings notification subcommand""" def __init__(self): super(SettingNotification, self).__init__() self.parser.prog = "couchbase-cli setting-notification" group = self.parser.add_argument_group("Notification Settings") group.add_argument("--enable-notifications", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enables/disable software notifications") @rest_initialiser(version_check=True) def execute(self, opts): enabled = None if opts.enabled == "1": enabled = True elif opts.enabled == "0": enabled = False _, errors = self.rest.enable_notifications(enabled) _exit_if_errors(errors) _success("Software notification settings updated") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-notification") @staticmethod def get_description(): return "Modify software notification settings" class SettingPasswordPolicy(Subcommand): """The settings password policy subcommand""" def __init__(self): super(SettingPasswordPolicy, self).__init__() self.parser.prog = "couchbase-cli setting-password-policy" group = self.parser.add_argument_group("Password Policy Settings") group.add_argument("--get", dest="get", action="store_true", default=False, help="Get the current password policy") group.add_argument("--set", dest="set", action="store_true", default=False, help="Set a new password policy") group.add_argument("--min-length", dest="min_length", type=int, default=None, metavar="<num>", help="Specifies the minimum password length for new passwords") group.add_argument("--uppercase", dest="upper_case", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must contain an upper case character") group.add_argument("--lowercase", dest="lower_case", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must contain a lower case character") group.add_argument("--digit", dest="digit", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must at least one digit") group.add_argument("--special-char", dest="special_char", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must at least one special character") @rest_initialiser(version_check=True) def execute(self, opts): actions = sum([opts.get, opts.set]) if actions == 0: _exit_if_errors(["Must specify either --get or --set"]) elif actions > 1: _exit_if_errors(["The --get and --set flags may not be specified at the same time"]) elif opts.get: if opts.min_length is not None or any([opts.upper_case, opts.lower_case, opts.digit, opts.special_char]): _exit_if_errors(["The --get flag must be used without any other arguments"]) self._get() elif opts.set: if opts.min_length is None: _exit_if_errors(["--min-length is required when using --set flag"]) if opts.min_length <= 0: _exit_if_errors(["--min-length has to be greater than 0"]) self._set(opts) def _get(self): policy, errors = self.rest.get_password_policy() _exit_if_errors(errors) print(json.dumps(policy, sort_keys=True, indent=2)) def _set(self, opts): _, errors = self.rest.set_password_policy(opts.min_length, opts.upper_case, opts.lower_case, opts.digit, opts.special_char) _exit_if_errors(errors) _success("Password policy updated") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-password-policy") @staticmethod def get_description(): return "Modify the password policy" class SettingSecurity(Subcommand): """The settings security subcommand""" def __init__(self): super(SettingSecurity, self).__init__() self.parser.prog = "couchbase-cli setting-security" group = self.parser.add_argument_group("Cluster Security Settings") group.add_argument('--get', default=False, action='store_true', help='Get security settings.') group.add_argument('--set', default=False, action='store_true', help='Set security settings.') group.add_argument("--disable-http-ui", dest="disable_http_ui", metavar="<0|1>", choices=['0', '1'], default=None, help="Disables access to the UI over HTTP (0 or 1)") group.add_argument("--disable-www-authenticate", dest="disable_www_authenticate", metavar="<0|1>", choices=['0', '1'], default=None, help="Disables use of WWW-Authenticate (0 or 1") group.add_argument("--cluster-encryption-level", dest="cluster_encryption_level", metavar="<all|control|strict>", choices=['all', 'control', 'strict'], default=None, help="Set cluster encryption level, only used when cluster encryption enabled.") group.add_argument('--tls-min-version', dest='tls_min_version', metavar='<tlsv1|tlsv1.1|tlsv1.2>', choices=['tlsv1', 'tlsv1.1', 'tlsv1.2'], default=None, help='Set the minimum TLS version') group.add_argument('--tls-honor-cipher-order', dest='tls_honor_cipher_order', metavar='<1|0>', choices=['1', '0'], help='Specify or not the cipher order has to be followed.', default=None) group.add_argument('--cipher-suites', metavar='<ciphers>', default=None, help='Comma separated list of ciphers to use.If an empty string (e.g "") given it will' ' reset ciphers to default.') @rest_initialiser(version_check=True) def execute(self, opts): if sum([opts.get, opts.set]) != 1: _exit_if_errors(['Provided either --set or --get.']) if opts.get: val, err = self.rest.get_security_settings() _exit_if_errors(err) print(json.dumps(val)) elif opts.set: self._set(self.rest, opts.disable_http_ui, opts.cluster_encryption_level, opts.tls_min_version, opts.tls_honor_cipher_order, opts.cipher_suites, opts.disable_www_authenticate) @staticmethod def _set(rest, disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites, disable_www_authenticate): if not any([True if x is not None else False for x in [disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites, disable_www_authenticate]]): _exit_if_errors(['please provide at least one of --cluster-encryption-level, --disable-http-ui,' ' --tls-min-version, --tls-honor-cipher-order or --cipher-suites together with --set']) if disable_http_ui == '1': disable_http_ui = 'true' elif disable_http_ui == '0': disable_http_ui = 'false' if disable_www_authenticate == '1': disable_www_authenticate = 'true' elif disable_www_authenticate == '0': disable_www_authenticate = 'false' if honor_order == '1': honor_order = 'true' elif honor_order == '0': honor_order = 'false' if cipher_suites == '': cipher_suites = json.dumps([]) elif cipher_suites is not None: cipher_suites = json.dumps(cipher_suites.split(',')) _, errors = rest.set_security_settings(disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites, disable_www_authenticate) _exit_if_errors(errors) _success("Security settings updated") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-security") @staticmethod def get_description(): return "Modify security settings" class SettingXdcr(Subcommand): """The setting xdcr subcommand""" def __init__(self): super(SettingXdcr, self).__init__() self.parser.prog = "couchbase-cli setting-xdcr" group = self.parser.add_argument_group("XDCR Settings") group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<num>", help="Intervals between checkpoints in seconds (60 to 14400)") group.add_argument("--worker-batch-size", dest="worker_batch_size", metavar="<num>", type=(int), help="Doc batch size (500 to 10000)") group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>", help="Document batching size in KB (10 to 100000)") group.add_argument("--failure-restart-interval", dest="fail_interval", metavar="<seconds>", type=(int), help="Interval for restarting failed xdcr in seconds (1 to 300)") group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int), metavar="<bytes>", help="Document body size threshold (bytes) to trigger optimistic " + "replication") group.add_argument("--source-nozzle-per-node", dest="src_nozzles", metavar="<num>", type=(int), help="The number of source nozzles per source node (1 to 10)") group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", metavar="<num>", type=(int), help="The number of outgoing nozzles per target node (1 to 10)") group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int), metavar="<num>", help="The bandwidth usage limit in MiB/Sec") group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"], help="Enable/disable compression") group.add_argument("--log-level", dest="log_level", metavar="<level>", choices=["Error", "Info", "Debug", "Trace"], help="The XDCR log level") group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>", help="The interval for statistics updates (in milliseconds)") group.add_argument('--max-processes', dest='max_proc', metavar="<num>", type=int, help='Number of processes to be allocated to XDCR. The default is 4.') @rest_initialiser(version_check=True, cluster_init_check=True, enterprise_check=False) def execute(self, opts): if not self.enterprise and opts.compression: _exit_if_errors(["--enable-compression can only be configured on enterprise edition"]) if opts.compression == "0": opts.compression = "None" elif opts.compression == "1": opts.compression = "Auto" _, errors = self.rest.xdcr_global_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval, opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level, opts.stats_interval, opts.max_proc) _exit_if_errors(errors) _success("Global XDCR settings updated") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-xdcr") @staticmethod def get_description(): return "Modify XDCR related settings" class SettingMasterPassword(Subcommand): """The setting master password subcommand""" def __init__(self): super(SettingMasterPassword, self).__init__() self.parser.prog = "couchbase-cli setting-master-password" group = self.parser.add_argument_group("Master password options") group.add_argument("--new-password", dest="new_password", metavar="<password>", required=False, action=CBNonEchoedAction, envvar=None, prompt_text="Enter new master password:", confirm_text="Confirm new master password:", help="Sets a new master password") group.add_argument("--rotate-data-key", dest="rotate_data_key", action="store_true", help="Rotates the master password data key") @rest_initialiser(version_check=True) def execute(self, opts): if opts.new_password is not None: _, errors = self.rest.set_master_pwd(opts.new_password) _exit_if_errors(errors) _success("New master password set") elif opts.rotate_data_key: _, errors = self.rest.rotate_master_pwd() _exit_if_errors(errors) _success("Data key rotated") else: _exit_if_errors(["No parameters set"]) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-master-password") @staticmethod def get_description(): return "Changing the settings of the master password" class SslManage(Subcommand): """The user manage subcommand""" def __init__(self): super(SslManage, self).__init__() self.parser.prog = "couchbase-cli ssl-manage" group = self.parser.add_argument_group("SSL manage options") group.add_argument("--cluster-cert-info", dest="cluster_cert", action="store_true", default=False, help="Gets the cluster certificate") group.add_argument("--node-cert-info", dest="node_cert", action="store_true", default=False, help="Gets the node certificate") group.add_argument("--regenerate-cert", dest="regenerate", metavar="<path>", help="Regenerate the cluster certificate and save it to a file") group.add_argument("--set-node-certificate", dest="set_cert", action="store_true", default=False, help="Sets the node certificate") group.add_argument("--upload-cluster-ca", dest="upload_cert", metavar="<path>", help="Upload a new cluster certificate") group.add_argument("--set-client-auth", dest="client_auth_path", metavar="<path>", help="A path to a file containing the client auth configuration") group.add_argument("--client-auth", dest="show_client_auth", action="store_true", help="Show ssl client certificate authentication value") group.add_argument("--extended", dest="extended", action="store_true", default=False, help="Print extended certificate information") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.regenerate is not None: try: open(opts.regenerate, 'a').close() except IOError: _exit_if_errors([f'Unable to create file at `{opts.regenerate}`']) certificate, errors = self.rest.regenerate_cluster_certificate() _exit_if_errors(errors) _exit_on_file_write_failure(opts.regenerate, certificate) _success(f'Certificate regenerate and copied to `{opts.regenerate}`') elif opts.cluster_cert: certificate, errors = self.rest.retrieve_cluster_certificate(opts.extended) _exit_if_errors(errors) if isinstance(certificate, dict): print(json.dumps(certificate, sort_keys=True, indent=2)) else: print(certificate) elif opts.node_cert: host = urllib.parse.urlparse(opts.cluster).netloc certificate, errors = self.rest.retrieve_node_certificate(host) _exit_if_errors(errors) print(json.dumps(certificate, sort_keys=True, indent=2)) elif opts.upload_cert: certificate = _exit_on_file_read_failure(opts.upload_cert) _, errors = self.rest.upload_cluster_certificate(certificate) _exit_if_errors(errors) _success(f'Uploaded cluster certificate to {opts.cluster}') elif opts.set_cert: _, errors = self.rest.set_node_certificate() _exit_if_errors(errors) _success("Node certificate set") elif opts.client_auth_path: data = _exit_on_file_read_failure(opts.client_auth_path) try: config = json.loads(data) except ValueError as e: _exit_if_errors([f'Client auth config does not contain valid json: {e}']) _, errors = self.rest.set_client_cert_auth(config) _exit_if_errors(errors) _success("SSL client auth updated") elif opts.show_client_auth: result, errors = self.rest.retrieve_client_cert_auth() _exit_if_errors(errors) print(json.dumps(result, sort_keys=True, indent=2)) else: _exit_if_errors(["No options specified"]) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-ssl-manage") @staticmethod def get_description(): return "Manage cluster certificates" class UserManage(Subcommand): """The user manage subcommand""" def __init__(self): super(UserManage, self).__init__() self.parser.prog = "couchbase-cli user-manage" group = self.parser.add_argument_group("User manage options") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete an existing RBAC user") group.add_argument("--get", dest="get", action="store_true", default=False, help="Display RBAC user details") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all RBAC users and their roles") group.add_argument("--my-roles", dest="my_roles", action="store_true", default=False, help="List my roles") group.add_argument("--set", dest="set", action="store_true", default=False, help="Create or edit an RBAC user") group.add_argument("--set-group", dest="set_group", action="store_true", default=False, help="Create or edit a user group") group.add_argument("--delete-group", dest="delete_group", action="store_true", default=False, help="Delete a user group") group.add_argument("--list-groups", dest="list_group", action="store_true", default=False, help="List all groups") group.add_argument("--get-group", dest="get_group", action="store_true", default=False, help="Get group") group.add_argument("--rbac-username", dest="rbac_user", metavar="<username>", help="The RBAC username") group.add_argument("--rbac-password", dest="rbac_pass", metavar="<password>", help="The RBAC password") group.add_argument("--rbac-name", dest="rbac_name", metavar="<name>", help="The full name of the user") group.add_argument("--roles", dest="roles", metavar="<roles_list>", help="The roles for the specified user") group.add_argument("--auth-domain", dest="auth_domain", metavar="<domain>", choices=["external", "local"], help="The authentication type for the specified user") group.add_argument("--user-groups", dest="groups", metavar="<groups>", help="List of groups for the user to be added to") group.add_argument("--group-name", dest="group", metavar="<group>", help="Group name") group.add_argument("--group-description", dest="description", metavar="<text>", help="Group description") group.add_argument("--ldap-ref", dest="ldap_ref", metavar="<ref>", help="LDAP group's distinguished name") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): num_selectors = sum([opts.delete, opts.list, opts.my_roles, opts.set, opts.get, opts.get_group, opts.list_group, opts.delete_group, opts.set_group]) if num_selectors == 0: _exit_if_errors(['Must specify --delete, --list, --my_roles, --set, --get, --get-group, --set-group, ' '--list-groups or --delete-group']) elif num_selectors != 1: _exit_if_errors(['Only one of the following can be specified:--delete, --list, --my_roles, --set, --get,' ' --get-group, --set-group, --list-groups or --delete-group']) if opts.delete: self._delete(opts) elif opts.list: self._list(opts) elif opts.my_roles: self._my_roles(opts) elif opts.set: self._set(opts) elif opts.get: self._get(opts) elif opts.get_group: self._get_group(opts) elif opts.set_group: self._set_group(opts) elif opts.list_group: self._list_groups() elif opts.delete_group: self._delete_group(opts) def _delete_group(self, opts): if opts.group is None: _exit_if_errors(['--group-name is required with the --delete-group option']) _, errors = self.rest.delete_user_group(opts.group) _exit_if_errors(errors) _success(f"Group '{opts.group}' was deleted") def _get_group(self, opts): if opts.group is None: _exit_if_errors(['--group-name is required with the --get-group option']) group, errors = self.rest.get_user_group(opts.group) _exit_if_errors(errors) print(json.dumps(group, indent=2)) def _set_group(self, opts): if opts.group is None: _exit_if_errors(['--group-name is required with --set-group']) _, errors = self.rest.set_user_group(opts.group, opts.roles, opts.description, opts.ldap_ref) _exit_if_errors(errors) _success(f"Group '{opts.group}' set") def _list_groups(self): groups, errors = self.rest.list_user_groups() _exit_if_errors(errors) print(json.dumps(groups, indent=2)) def _delete(self, opts): if opts.rbac_user is None: _exit_if_errors(["--rbac-username is required with the --delete option"]) if opts.rbac_pass is not None: _warning("--rbac-password is not used with the --delete option") if opts.rbac_name is not None: _warning("--rbac-name is not used with the --delete option") if opts.roles is not None: _warning("--roles is not used with the --delete option") if opts.auth_domain is None: _exit_if_errors(["--auth-domain is required with the --delete option"]) _, errors = self.rest.delete_rbac_user(opts.rbac_user, opts.auth_domain) _exit_if_errors(errors) _success(f"User '{opts.rbac_user}' was removed") def _list(self, opts): if opts.rbac_user is not None: _warning(["--rbac-username is not used with the --list option"]) if opts.rbac_pass is not None: _warning(["--rbac-password is not used with the --list option"]) if opts.rbac_name is not None: _warning("--rbac-name is not used with the --list option") if opts.roles is not None: _warning("--roles is not used with the --list option") if opts.auth_domain is not None: _warning("--auth-domain is not used with the --list option") result, errors = self.rest.list_rbac_users() _exit_if_errors(errors) print(json.dumps(result, indent=2)) def _get(self, opts): if opts.rbac_user is None: _exit_if_errors(["--rbac-username is required with the --get option"]) if opts.rbac_pass is not None: _warning("--rbac-password is not used with the --get option") if opts.rbac_name is not None: _warning("--rbac-name is not used with the --get option") if opts.roles is not None: _warning("--roles is not used with the --get option") if opts.auth_domain is not None: _warning("--auth-domain is not used with the --get option") result, errors = self.rest.list_rbac_users() _exit_if_errors(errors) user = [u for u in result if u['id'] == opts.rbac_user] if len(user) != 0: print(json.dumps(user, indent=2)) else: _exit_if_errors([f'no user {opts.rbac_user}']) def _my_roles(self, opts): if opts.rbac_user is not None: _warning("--rbac-username is not used with the --my-roles option") if opts.rbac_pass is not None: _warning("--rbac-password is not used with the --my-roles option") if opts.rbac_name is not None: _warning("--rbac-name is not used with the --my-roles option") if opts.roles is not None: _warning("--roles is not used with the --my-roles option") if opts.auth_domain is not None: _warning("--auth-domain is not used with the --my-roles option") result, errors = self.rest.my_roles() _exit_if_errors(errors) print(json.dumps(result, indent=2)) def _set(self, opts): if opts.rbac_user is None: _exit_if_errors(["--rbac-username is required with the --set option"]) if opts.rbac_pass is not None and opts.auth_domain == "external": _warning("--rbac-password cannot be used with the external auth domain") opts.rbac_pass = None if opts.auth_domain is None: _exit_if_errors(["--auth-domain is required with the --set option"]) _, errors = self.rest.set_rbac_user(opts.rbac_user, opts.rbac_pass, opts.rbac_name, opts.roles, opts.auth_domain, opts.groups) _exit_if_errors(errors) if opts.roles is not None and "query_external_access" in opts.roles: _warning('Granting the query_external_access role permits execution of the N1QL ' 'function CURL() and may allow access to other network endpoints in the local network and' 'the Internet.') _success(f"User {opts.rbac_user} set") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-user-manage") @staticmethod def get_description(): return "Manage RBAC users" class XdcrReplicate(Subcommand): """The xdcr replicate subcommand""" def __init__(self): super(XdcrReplicate, self).__init__() self.parser.prog = "couchbase-cli xdcr-replicate" group = self.parser.add_argument_group("XDCR replicate options") group.add_argument("--get", action="store_true", help="Retrieve the settings of a XDCR replication.") group.add_argument("--create", dest="create", action="store_true", default=False, help="Create an XDCR replication") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete an XDCR replication") group.add_argument("--pause", dest="pause", action="store_true", default=False, help="Pause an XDCR replication") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all XDCR replications") group.add_argument("--resume", dest="resume", action="store_true", default=False, help="Resume an XDCR replication") group.add_argument("--settings", dest="settings", action="store_true", default=False, help="Set advanced settings for an XDCR replication") group.add_argument("--xdcr-from-bucket", dest="from_bucket", metavar="<bucket>", help="The name bucket to replicate data from") group.add_argument("--xdcr-to-bucket", dest="to_bucket", metavar="<bucket>", help="The name bucket to replicate data to") group.add_argument("--xdcr-cluster-name", dest="cluster_name", metavar="<name>", help="The name of the cluster reference to replicate to") group.add_argument("--xdcr-replication-mode", dest="rep_mode", metavar="<mode>", choices=["xmem", "capi"], action=CBDeprecatedAction, help=SUPPRESS) group.add_argument("--filter-expression", dest="filter", metavar="<regex>", help="Regular expression to filter replication streams") group.add_argument("--filter-skip-restream", dest="filter_skip", action="store_true", default=False, help="Restart the replication. It must be specified together with --filter-expression") group.add_argument("--xdcr-replicator", dest="replicator_id", metavar="<id>", help="Replication ID") group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<seconds>", help="Intervals between checkpoints in seconds (60 to 14400)") group.add_argument("--worker-batch-size", dest="worker_batch_size", type=(int), metavar="<num>", help="Doc batch size (500 to 10000)") group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>", help="Document batching size in KB (10 to 100000)") group.add_argument("--failure-restart-interval", dest="fail_interval", type=(int), metavar="<seconds>", help="Interval for restarting failed xdcr in seconds (1 to 300)") group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int), metavar="<bytes>", help="Document body size threshold to trigger optimistic replication" + " (bytes)") group.add_argument("--source-nozzle-per-node", dest="src_nozzles", type=(int), metavar="<num>", help="The number of source nozzles per source node (1 to 10)") group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", type=(int), metavar="<num>", help="The number of outgoing nozzles per target node (1 to 10)") group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int), metavar="<num>", help="The bandwidth usage limit in MiB/Sec") group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"], help="Enable/disable compression") group.add_argument("--log-level", dest="log_level", metavar="<level>", choices=["Error", "Warn", "Info", "Debug", "Trace"], help="The XDCR log level") group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>", help="The interval for statistics updates (in milliseconds)") group.add_argument("--priority", dest="priority", choices=['High', 'Medium', 'Low'], metavar="<High|Medium|Low>", help='XDCR priority, by default set to High') group.add_argument('--reset-expiry', choices=['1', '0'], metavar='<1|0>', dest='reset_expiry', default=None, help='When set to true the expiry of mutations will be set to zero') group.add_argument('--filter-deletion', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_del', help='When set to true delete mutations will be filter out and not sent to the target ' 'cluster') group.add_argument('--filter-expiration', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_exp', help='When set to true expiry mutations will be filter out and not sent to the target ' 'cluster') collection_group = self.parser.add_argument_group("Collection options") collection_group.add_argument('--collection-explicit-mappings', choices=['1', '0'], metavar='<1|0>', default=None, help='If explicit collection mappings is to be used. ' '(Enterprise Edition Only)') collection_group.add_argument('--collection-migration', choices=['1', '0'], metavar='<1|0>', default=None, help='If XDCR is to run in collection migration mode. ' '(Enterprise Edition only)') collection_group.add_argument('--collection-mapping-rules', type=str, default=None, metavar='<mappings>', help='The mapping rules specified as a JSON formatted string. ' '(Enterprise Edition Only)') @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if not self.enterprise and opts.compression: _exit_if_errors(["--enable-compression can only be configured on enterprise edition"]) if not self.enterprise and (opts.collection_migration or opts.collection_explicit_mappings is not None or opts.collection_mapping_rules is not None): _exit_if_errors(["[--collection-migration, --collection-explicit-mappings, --collection-mapping-rules] can" " only be configured on enterprise edition"]) if opts.compression == "0": opts.compression = "None" elif opts.compression == "1": opts.compression = "Auto" actions = sum([opts.create, opts.delete, opts.pause, opts.list, opts.resume, opts.settings, opts.get]) if actions == 0: _exit_if_errors(['Must specify one of --create, --delete, --pause, --list, --resume, --settings, --get']) elif actions > 1: _exit_if_errors(['The --create, --delete, --pause, --list, --resume, --settings, --get flags may not be ' 'specified at the same time']) elif opts.create: self._create(opts) elif opts.delete: self._delete(opts) elif opts.pause or opts.resume: self._pause_resume(opts) elif opts.list: self._list() elif opts.settings: self._settings(opts) elif opts.get: self._get(opts) def _get(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is need to get the replicator settings"]) settings, errors = self.rest.get_xdcr_replicator_settings(opts.replicator_id) _exit_if_errors(errors) print(json.dumps(settings, indent=4, sort_keys=True)) def _create(self, opts): if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1': _exit_if_errors(['cannot enable both collection migration and explicit mappings']) if opts.filter_skip and opts.filter is None: _exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"]) _, errors = self.rest.create_xdcr_replication(opts.cluster_name, opts.to_bucket, opts.from_bucket, opts.chk_int, opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval, opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level, opts.stats_interval, opts.filter, opts.priority, opts.reset_expiry, opts.filter_del, opts.filter_exp, opts.collection_explicit_mappings, opts.collection_migration, opts.collection_mapping_rules) _exit_if_errors(errors) _success("XDCR replication created") def _delete(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is needed to delete a replication"]) _, errors = self.rest.delete_xdcr_replicator(opts.replicator_id) _exit_if_errors(errors) _success("XDCR replication deleted") def _pause_resume(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is needed to pause or resume a replication"]) tasks, errors = self.rest.get_tasks() _exit_if_errors(errors) for task in tasks: if task["type"] == "xdcr" and task["id"] == opts.replicator_id: if opts.pause and task["status"] == "notRunning": _exit_if_errors(["The replication is not running yet. Pause is not needed"]) if opts.resume and task["status"] == "running": _exit_if_errors(["The replication is running already. Resume is not needed"]) break if opts.pause: _, errors = self.rest.pause_xdcr_replication(opts.replicator_id) _exit_if_errors(errors) _success("XDCR replication paused") elif opts.resume: _, errors = self.rest.resume_xdcr_replication(opts.replicator_id) _exit_if_errors(errors) _success("XDCR replication resume") def _list(self): tasks, errors = self.rest.get_tasks() _exit_if_errors(errors) for task in tasks: if task["type"] == "xdcr": print(f'stream id: {task["id"]}') print(f' status: {task["status"]}') print(f' source: {task["source"]}') print(f' target: {task["target"]}') if "filterExpression" in task and task["filterExpression"] != "": print(f' filter: {task["filterExpression"]}') def _settings(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is needed to change a replicators settings"]) if opts.filter_skip and opts.filter is None: _exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"]) if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1': _exit_if_errors(['cannot enable both collection migration and explicit mappings']) _, errors = self.rest.xdcr_replicator_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval, opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level, opts.stats_interval, opts.replicator_id, opts.filter, opts.filter_skip, opts.priority, opts.reset_expiry, opts.filter_del, opts.filter_exp, opts.collection_explicit_mappings, opts.collection_migration, opts.collection_mapping_rules) _exit_if_errors(errors) _success("XDCR replicator settings updated") @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-xdcr-replicate") @staticmethod def get_description(): return "Manage XDCR cluster references" class XdcrSetup(Subcommand): """The xdcr setup subcommand""" def __init__(self): super(XdcrSetup, self).__init__() self.parser.prog = "couchbase-cli xdcr-setup" group = self.parser.add_argument_group("XDCR setup options") group.add_argument("--create", dest="create", action="store_true", default=False, help="Create an XDCR remote reference") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete an XDCR remote reference") group.add_argument("--edit", dest="edit", action="store_true", default=False, help="Set the local read-only user") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all XDCR remote references") group.add_argument("--xdcr-cluster-name", dest="name", metavar="<name>", help="The name for the remote cluster reference") group.add_argument("--xdcr-hostname", dest="hostname", metavar="<hostname>", help="The hostname of the remote cluster reference") group.add_argument("--xdcr-username", dest="r_username", metavar="<username>", help="The username of the remote cluster reference") group.add_argument("--xdcr-password", dest="r_password", metavar="<password>", help="The password of the remote cluster reference") group.add_argument("--xdcr-user-certificate", dest="r_certificate", metavar="<path>", help="The user certificate for authentication") group.add_argument("--xdcr-user-key", dest="r_key", metavar="<path>", help="The user key for authentication") group.add_argument("--xdcr-demand-encryption", dest="encrypt", choices=["0", "1"], action=CBDeprecatedAction, help=SUPPRESS) group.add_argument("--xdcr-encryption-type", dest="encryption_type", choices=["full", "half"], metavar="<type>", action=CBDeprecatedAction, help=SUPPRESS) group.add_argument("--xdcr-certificate", dest="certificate", metavar="<path>", help="The certificate used for encryption") group.add_argument("--xdcr-secure-connection", dest="secure_connection", choices=["none", "full", "half"], metavar="<type>", help="The XDCR secure connection type") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): actions = sum([opts.create, opts.delete, opts.edit, opts.list]) if actions == 0: _exit_if_errors(["Must specify one of --create, --delete, --edit, --list"]) elif actions > 1: _exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"]) elif opts.create or opts.edit: self._set(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list() def _set(self, opts): cmd = "create" if opts.edit: cmd = "edit" if opts.name is None: _exit_if_errors([f'--xdcr-cluster-name is required to {cmd} a cluster connection']) if opts.hostname is None: _exit_if_errors([f'--xdcr-hostname is required to {cmd} a cluster connections']) if opts.username is None: _exit_if_errors([f'--xdcr-username is required to {cmd} a cluster connections']) if opts.password is None: _exit_if_errors([f'--xdcr-password is required to {cmd} a cluster connections']) if (opts.encrypt is not None or opts.encryption_type is not None) and opts.secure_connection is not None: _exit_if_errors(["Cannot use deprecated flags --xdcr-demand-encryption or --xdcr-encryption-type with" " --xdcr-secure-connection"]) if opts.secure_connection == "none": opts.encrypt = "0" opts.encryption_type = None elif opts.secure_connection == "half": opts.encrypt = "1" opts.encryption_type = "half" elif opts.secure_connection == "full": opts.encrypt = "1" opts.encryption_type = "full" elif opts.encrypt is None and opts.encryption_type is None: opts.encrypt = "0" opts.encryption_type = None raw_cert = None if opts.encrypt == "1": if opts.encryption_type is None: opts.encryption_type = "full" if opts.encryption_type == "full": if opts.certificate is None: _exit_if_errors(["certificate required if encryption is demanded"]) raw_cert = _exit_on_file_read_failure(opts.certificate) raw_user_key = None if opts.r_key: raw_user_key = _exit_on_file_read_failure(opts.r_key) raw_user_cert = None if opts.r_certificate: raw_user_cert = _exit_on_file_read_failure(opts.r_certificate) if opts.create: _, errors = self.rest.create_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password, opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert, raw_user_key) _exit_if_errors(errors) _success("Cluster reference created") else: _, errors = self.rest.edit_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password, opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert, raw_user_key) _exit_if_errors(errors) _success("Cluster reference edited") def _delete(self, opts): if opts.name is None: _exit_if_errors(["--xdcr-cluster-name is required to deleta a cluster connection"]) _, errors = self.rest.delete_xdcr_reference(opts.name) _exit_if_errors(errors) _success("Cluster reference deleted") def _list(self): clusters, errors = self.rest.list_xdcr_references() _exit_if_errors(errors) for cluster in clusters: if not cluster["deleted"]: print(f'cluster name: {cluster["name"]}') print(f' uuid: {cluster["uuid"]}') print(f' host name: {cluster["hostname"]}') print(f' user name: {cluster["username"]}') print(f' uri: {cluster["uri"]}') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-xdcr-setup") @staticmethod def get_description(): return "Manage XDCR replications" class EventingFunctionSetup(Subcommand): """The Eventing Service Function setup subcommand""" def __init__(self): super(EventingFunctionSetup, self).__init__() self.parser.prog = "couchbase-cli eventing-function-setup" group = self.parser.add_argument_group("Eventing Service Function setup options") group.add_argument("--import", dest="_import", action="store_true", default=False, help="Import functions") group.add_argument("--export", dest="export", action="store_true", default=False, help="Export a function") group.add_argument("--export-all", dest="export_all", action="store_true", default=False, help="Export all functions") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete a function") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all functions") group.add_argument("--deploy", dest="deploy", action="store_true", default=False, help="Deploy a function") group.add_argument("--undeploy", dest="undeploy", action="store_true", default=False, help="Undeploy a function") group.add_argument("--boundary", dest="boundary", metavar="<from-everything|from-now>", choices=["from-everything", "from-now"], default=False, help="Set the function deployment boundary") group.add_argument("--name", dest="name", metavar="<name>", default=False, help="The name of the function to take an action on") group.add_argument("--file", dest="filename", metavar="<file>", default=False, help="The file to export and import function(s) to and from") group.add_argument("--pause", dest="pause", action="store_true", help="Pause a function") group.add_argument("--resume", dest="resume", action="store_true", help="Resume a function") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): # pylint: disable=protected-access actions = sum([opts._import, opts.export, opts.export_all, opts.delete, opts.list, opts.deploy, opts.undeploy, opts.pause, opts.resume]) if actions == 0: _exit_if_errors(["Must specify one of --import, --export, --export-all, --delete, --list, --deploy," " --undeploy, --pause, --resume"]) elif actions > 1: _exit_if_errors(['The --import, --export, --export-all, --delete, --list, --deploy, --undeploy, --pause, ' '--resume flags may not be specified at the same time']) elif opts._import: # pylint: disable=protected-access self._import(opts) elif opts.export: self._export(opts) elif opts.export_all: self._export_all(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list() elif opts.deploy: self._deploy_undeploy(opts, True) elif opts.undeploy: self._deploy_undeploy(opts, False) elif opts.pause: self._pause_resume(opts, True) elif opts.resume: self._pause_resume(opts, False) def _pause_resume(self, opts, pause): if not opts.name: _exit_if_errors([f"Flag --name is required with the {'--pause' if pause else '--resume'} flag"]) _, err = self.rest.pause_resume_function(opts.name, pause) _exit_if_errors(err) _success(f"Function was {'paused' if pause else 'resumed'}") def _import(self, opts): if not opts.filename: _exit_if_errors(["--file is needed to import functions"]) import_functions = _exit_on_file_read_failure(opts.filename) import_functions = json.loads(import_functions) _, errors = self.rest.import_functions(import_functions) _exit_if_errors(errors) _success("Events imported") def _export(self, opts): if not opts.filename: _exit_if_errors(["--file is needed to export a function"]) if not opts.name: _exit_if_errors(["--name is needed to export a function"]) functions, errors = self.rest.export_functions() _exit_if_errors(errors) exported_function = None for function in functions: if function["appname"] == opts.name: exported_function = [function] if not exported_function: _exit_if_errors([f'Function {opts.name} does not exist']) _exit_on_file_write_failure(opts.filename, json.dumps(exported_function, separators=(',', ':'))) _success("Function exported to: " + opts.filename) def _export_all(self, opts): if not opts.filename: _exit_if_errors(["--file is needed to export all functions"]) exported_functions, errors = self.rest.export_functions() _exit_if_errors(errors) _exit_on_file_write_failure(opts.filename, json.dumps(exported_functions, separators=(',', ':'))) _success(f'All functions exported to: {opts.filename}') def _delete(self, opts): if not opts.name: _exit_if_errors(["--name is needed to delete a function"]) _, errors = self.rest.delete_function(opts.name) _exit_if_errors(errors) _success("Request to delete the function was accepted") def _deploy_undeploy(self, opts, deploy): if not opts.name: _exit_if_errors([f"--name is needed to {'deploy' if deploy else 'undeploy'} a function"]) if deploy and not opts.boundary: _exit_if_errors(["--boundary is needed to deploy a function"]) _, errors = self.rest.deploy_undeploy_function(opts.name, deploy, opts.boundary) _exit_if_errors(errors) _success(f"Request to {'deploy' if deploy else 'undeploy'} the function was accepted") def _list(self): functions, errors = self.rest.list_functions() _exit_if_errors(errors) for function in functions: print(function['appname']) status = '' if function['settings']['deployment_status']: status = 'Deployed' else: status = 'Undeployed' print(f' Status: {status}') print(f' Source Bucket: {function["depcfg"]["source_bucket"]}') print(f' Metadata Bucket: {function["depcfg"]["metadata_bucket"]}') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-eventing-function-setup") @staticmethod def get_description(): return "Manage Eventing Service Functions" class AnalyticsLinkSetup(Subcommand): """The analytics link setup subcommand""" def __init__(self): super(AnalyticsLinkSetup, self).__init__() self.parser.prog = "couchbase-cli analytics-link-setup" group = self.parser.add_argument_group("Analytics Service link setup options") group.add_argument("--create", dest="create", action="store_true", default=False, help="Create a link") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete a link") group.add_argument("--edit", dest="edit", action="store_true", default=False, help="Modify a link") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all links") group.add_argument("--dataverse", dest="dataverse", metavar="<name>", help="The dataverse of the link (Deprecated)") group.add_argument("--scope", dest="scope", metavar="<name>", help="The analytics scope of the link in its canonical form") group.add_argument("--name", dest="name", metavar="<name>", help="The name of the link") group.add_argument("--type", dest="type", metavar="<type>", choices=["couchbase", "s3", "azureblob"], help="The type of the link ('couchbase', 's3' or 'azureblob')") group = self.parser.add_argument_group("Analytics Service Couchbase link setup options") group.add_argument("--hostname", dest="hostname", metavar="<hostname>", help="The hostname of the link") group.add_argument("--link-username", dest="link_username", metavar="<username>", help="The username of the link") group.add_argument("--link-password", dest="link_password", metavar="<password>", help="The password of the link") group.add_argument("--user-certificate", dest="user_certificate", metavar="<path>", help="The user certificate for authentication") group.add_argument("--user-key", dest="user_key", metavar="<path>", help="The user key for authentication") group.add_argument("--certificate", dest="certificate", metavar="<path>", help="The certificate used for encryption") group.add_argument("--encryption", dest="encryption", choices=["none", "full", "half"], metavar="<type>", help="The link encryption type ('none', 'full' or 'half')") group = self.parser.add_argument_group("Analytics Service S3 link setup options") group.add_argument("--access-key-id", dest="access_key_id", metavar="<id>", help="The access key ID of the link") group.add_argument("--secret-access-key", dest="secret_access_key", metavar="<key>", help="The secret access key of the link") group.add_argument("--session-token", dest="session_token", metavar="<token>", help="Temporary credentials session token") group.add_argument("--region", dest="region", metavar="<region>", help="The region of the link") group.add_argument("--service-endpoint", dest="service_endpoint", metavar="<url>", help="The service endpoint of the link (optional)") group = self.parser.add_argument_group("Analytics Service Azure Blob link setup options") group.add_argument("--connection-string", dest="connection_string", metavar="<key>", help="The connection string of the link") group.add_argument("--account-name", dest="account_name", metavar="<id>", help="The account name of the link") group.add_argument("--account-key", dest="account_key", metavar="<key>", help="The account key of the link") group.add_argument("--shared-access-signature", dest="shared_access_signature", metavar="<token>", help="The shared access signature of the link") group.add_argument("--blob-endpoint", dest="blob_endpoint", metavar="<url>", help="The blob endpoint of the link (optional)") group.add_argument("--endpoint-suffix", dest="endpoint_suffix", metavar="<url>", help="The endpoint suffix of the link (optional)") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): actions = sum([opts.create, opts.delete, opts.edit, opts.list]) if actions == 0: _exit_if_errors(["Must specify one of --create, --delete, --edit, --list"]) elif actions > 1: _exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"]) if opts.dataverse: _deprecated("--dataverse is deprecated, please use --scope instead") if opts.dataverse and opts.scope: _exit_if_errors(['Only one of --dataverse and --scope is allowed']) if opts.create or opts.edit: self._set(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list(opts) def _set(self, opts): cmd = "create" if opts.edit: cmd = "edit" if opts.dataverse is None and opts.scope is None: _exit_if_errors([f'--dataverse or --scope is required to {cmd} a link']) if opts.name is None: _exit_if_errors([f'--name is required to {cmd} a link']) if opts.create and opts.type is None: _exit_if_errors([f'--type is required to {cmd} a link']) if opts.type == 'azureblob': if opts.connection_string is None and opts.account_key is None and opts.shared_access_signature is None: _exit_if_errors(['No authentication parameters provided']) if opts.connection_string and (opts.account_key or opts.shared_access_signature): _exit_if_errors(['Only a single authentication method is allowed']) if opts.account_key and opts.shared_access_signature: _exit_if_errors(['Only a single authentication method is allowed']) if opts.dataverse: opts.scope = opts.dataverse if opts.certificate: opts.certificate = _exit_on_file_read_failure(opts.certificate) if opts.user_key: opts.user_key = _exit_on_file_read_failure(opts.user_key) if opts.user_certificate: opts.user_certificate = _exit_on_file_read_failure(opts.user_certificate) if opts.create: _, errors = self.rest.create_analytics_link(opts) _exit_if_errors(errors) _success("Link created") else: _, errors = self.rest.edit_analytics_link(opts) _exit_if_errors(errors) _success("Link edited") def _delete(self, opts): if opts.dataverse is None and opts.scope is None: _exit_if_errors(['--dataverse or --scope is required to delete a link']) if opts.name is None: _exit_if_errors(['--name is required to delete a link']) if opts.dataverse: opts.scope = opts.dataverse _, errors = self.rest.delete_analytics_link(opts.scope, opts.name) _exit_if_errors(errors) _success("Link deleted") def _list(self, opts): if opts.dataverse: opts.scope = opts.dataverse clusters, errors = self.rest.list_analytics_links(opts.scope, opts.name, opts.type) _exit_if_errors(errors) print(json.dumps(clusters, sort_keys=True, indent=2)) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-analytics-link-setup") @staticmethod def get_description(): return "Manage Analytics Links" class UserChangePassword(Subcommand): """The change password subcommand""" def __init__(self): super(UserChangePassword, self).__init__() self.parser.prog = "couchbase-cli user-change-password" group = self.parser.add_argument_group("User password change option") group.add_argument("--new-password", dest="new_pass", metavar="<password>", required=True, help="The new password") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.new_pass is None: _exit_if_errors(["--new-password is required"]) _, rv = self.rest.user_change_passsword(opts.new_pass) _exit_if_errors(rv) _success(f'Changed password for {opts.username}') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-user-change-password") @staticmethod def get_description(): return "Change user password" class CollectionManage(Subcommand): """The collections-manage subcommand""" def __init__(self): super(CollectionManage, self).__init__() self.parser.prog = "couchbase-cli collection-manage" group = self.parser.add_argument_group("Collection manage option") group.add_argument("--bucket", dest="bucket", metavar="<bucket>", required=True, help="The bucket to use") group.add_argument("--create-scope", dest="create_scope", metavar="<scope>", default=None, help="The name of the scope to make") group.add_argument("--drop-scope", dest="drop_scope", metavar="<scope>", default=None, help="The name of the scope to remove") group.add_argument("--list-scopes", dest="list_scopes", action="store_true", default=None, help="List all of the scopes in the bucket") group.add_argument("--create-collection", dest="create_collection", metavar="<collection>", default=None, help="The path to the collection to make") group.add_argument("--drop-collection", dest="drop_collection", metavar="<collection>", default=None, help="The path to the collection to remove") group.add_argument("--list-collections", dest="list_collections", metavar="<scope_list>", default=None, const="", nargs='?', help="List all of the collections in the provided scopes. If no scopes " "are provided it will print all collections") group.add_argument("--max-ttl", dest="max_ttl", metavar="<seconds>", type=int, help="Set the maximum TTL the collection will accept") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): cmds = [opts.create_scope, opts.drop_scope, opts.list_scopes, opts.create_collection, opts.drop_collection, opts.list_collections] cmd_total = sum(cmd is not None for cmd in cmds) args = "--create-scope, --drop-scope, --list-scopes, --create-collection, --drop-collection, or " \ "--list-collections" if cmd_total == 0: _exit_if_errors([f'Must specify one of the following: {args}']) elif cmd_total != 1: _exit_if_errors([f'Only one of the following may be specified: {args}']) if opts.max_ttl is not None and opts.create_collection is None: _exit_if_errors(["--max-ttl can only be set with --create-collection"]) if opts.create_scope: self._create_scope(opts) if opts.drop_scope: self._drop_scope(opts) if opts.list_scopes: self._list_scopes(opts) if opts.create_collection: self._create_collection(opts) if opts.drop_collection: self._drop_collection(opts) if opts.list_collections is not None: self._list_collections(opts) def _create_scope(self, opts): _, errors = self.rest.create_scope(opts.bucket, opts.create_scope) _exit_if_errors(errors) _success("Scope created") def _drop_scope(self, opts): _, errors = self.rest.drop_scope(opts.bucket, opts.drop_scope) _exit_if_errors(errors) _success("Scope dropped") def _list_scopes(self, opts): manifest, errors = self.rest.get_manifest(opts.bucket) _exit_if_errors(errors) for scope in manifest['scopes']: print(scope['name']) def _create_collection(self, opts): scope, collection = self._get_scope_collection(opts.create_collection) _, errors = self.rest.create_collection(opts.bucket, scope, collection, opts.max_ttl) _exit_if_errors(errors) _success("Collection created") def _drop_collection(self, opts): scope, collection = self._get_scope_collection(opts.drop_collection) _, errors = self.rest.drop_collection(opts.bucket, scope, collection) _exit_if_errors(errors) _success("Collection dropped") def _list_collections(self, opts): manifest, errors = self.rest.get_manifest(opts.bucket) _exit_if_errors(errors) if opts.list_collections == "": scope_dict = {} else: scope_dict = {scope: False for scope in opts.list_collections.split(',')} if opts.output == 'json': self._json_list_collections(manifest, scope_dict) return for scope in manifest['scopes']: if len(scope_dict) == 0 or scope['name'] in scope_dict: if len(scope_dict) > 0: scope_dict[scope['name']] = True print(f'Scope {scope["name"]}:') for collection in scope['collections']: print(f' - {collection["name"]}') if len(scope_dict) > 0: for scope, found in scope_dict.items(): if not found: _warning(f'Scope "{scope}" does not exist') @staticmethod def _json_list_collections(manifest: Dict[str, Any], scope_dict: Dict[str, bool]): out = {} for scope in manifest['scopes']: if len(scope_dict) == 0 or scope['name'] in scope_dict: out[scope['name']] = [collection["name"] for collection in scope['collections']] print(json.dumps(out, indent=4)) def _get_scope_collection(self, path): scope, collection, err = self.expand_collection_shortcut(path) if err is not None: _exit_if_errors([err]) return scope, collection @staticmethod def expand_collection_shortcut(path): parts = path.split('.') if len(parts) != 2: return None, None, f'invalid collection path {path}' parts = ['_default' if x == '' else x for x in parts] return parts[0], parts[1], None @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-collection-manage") @staticmethod def get_description(): return "Manage collections in a bucket" class EnableDeveloperPreview(Subcommand): """"The enable developer preview command""" def __init__(self): super(EnableDeveloperPreview, self).__init__() self.parser.prog = "couchbase-cli enable-developer-preview" group = self.parser.add_argument_group("Developer preview option") group.add_argument('--enable', dest='enable', required=False, action="store_true", help='Enable developer preview mode in target cluster') group.add_argument('--list', dest='list', required=False, action="store_true", help='Check if cluster is in developer preview mode') @rest_initialiser(version_check=True) def execute(self, opts): if not (opts.enable or opts.list): _exit_if_errors(['--enable or --list must be provided']) if opts.enable and opts.list: _exit_if_errors(['cannot provide both --enable and --list']) if opts.enable: confirm = input('Developer preview cannot be disabled once it is enabled. ' 'If you enter developer preview mode you will not be able to ' 'upgrade. DO NOT USE IN PRODUCTION.\nAre you sure [y/n]: ') if confirm == 'y': _, errors = self.rest.set_dp_mode() _exit_if_errors(errors) _success("Cluster is in developer preview mode") elif confirm == 'n': _success("Developer preview mode has NOT been enabled") else: _exit_if_errors(["Unknown option provided"]) if opts.list: pools, rv = self.rest.pools() _exit_if_errors(rv) if 'isDeveloperPreview' in pools and pools['isDeveloperPreview']: print('Cluster is in developer preview mode') else: print('Cluster is NOT in developer preview mode') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-enable-developer-preview") @staticmethod def get_description(): return "Enable developer preview mode in target cluster" class SettingAlternateAddress(Subcommand): """"Setting alternate address command""" def __init__(self): super(SettingAlternateAddress, self).__init__() self.parser.prog = "couchbase-cli setting-alternate-address" group = self.parser.add_argument_group("Configure alternate addresses") group.add_argument('--set', dest='set', required=False, action="store_true", help='Set external address configuration for the node') group.add_argument('--remove', dest='remove', required=False, action="store_true", help='Remove external address configuration') group.add_argument('--list', dest='list', required=False, action='store_true', help='Retrieve current alternate address configuration for all nodes') group.add_argument('--node', dest='node', metavar="<node>", help="Specify the node to update") group.add_argument('--hostname', dest='alternate_hostname', metavar="<host>", help='Alternate address') group.add_argument('--ports', dest='ports', metavar="<ports>", help="A comma separated list specifying port mappings for the services") @rest_initialiser(version_check=True) def execute(self, opts): flags_used = sum([opts.set, opts.list, opts.remove]) if flags_used != 1: _exit_if_errors(['Use exactly one of --set, --list or --remove']) if opts.set or opts.remove: if not opts.node: _exit_if_errors(['--node has to be set when using --set or --remove']) # Alternate address can only be set on the node it self. The opts.cluster # is updated with the opts.node instead to allow ease of use. # The node name can have a port number (./cluster_run) hostname, port = self._get_host_port(opts.node) url = urllib.parse.urlparse(opts.cluster) if url.scheme: scheme = url.scheme if url.port and not port: port = url.port elif not port: _, old_port = self._get_host_port(opts.cluster) if old_port: port = old_port if scheme: cluster = f'{scheme}://' cluster += hostname if port: cluster += f':{port}' opts.cluster = cluster # override rest client so it uses the node to be altered self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify, opts.cacert, opts.debug) if opts.set: ports, error = self._parse_ports(opts.ports) _exit_if_errors(error) _, error = self.rest.set_alternate_address(opts.alternate_hostname, ports) _exit_if_errors(error) if opts.remove: _, error = self.rest.delete_alternate_address() _exit_if_errors(error) _success('Alternate address configuration deleted') if opts.list: add, error = self.rest.get_alternate_address() _exit_if_errors(error) if opts.output == 'standard': port_names = set() for node in add: if 'alternateAddresses' in node and 'ports' in node['alternateAddresses']['external']: for port in node['alternateAddresses']['external']['ports'].keys(): port_names.add(port) print('{:20}{:20}{}'.format('Hostname', 'Alternate Address', 'Ports (Primary/Alternate)')) print('{:40}'.format(' '), end='') port_names = sorted(port_names) for port in port_names: column_size = len(port) + 1 if column_size < 11: column_size = 11 print(f'{port:{column_size}}', end='') print() for node in add: if 'alternateAddresses' in node: # For cluster_run and single node clusters there is no hostname try: print(f'{node["hostname"]:20}{node["alternateAddresses"]["external"]["hostname"]:20}', end='') except KeyError: host = 'UNKNOWN' print(f'{host:20}{node["alternateAddresses"]["external"]["hostname"]:20}', end='') for port in port_names: column_size = len(port) + 1 if column_size < 11: column_size = 11 ports = ' ' if port in node['alternateAddresses']['external']['ports']: ports = f'{str(node["services"][port])}' \ f'/{str(node["alternateAddresses"]["external"]["ports"][port])}' print(f'{ports:{column_size}}', end='') print() else: # For cluster_run and single node clusters there is no hostanme try: print(f'{node["hostname"]}') except KeyError: print('UNKNOWN') else: print(json.dumps(add)) @staticmethod def _parse_ports(ports): if ports is None: return None, None port_mappings = ports.split(',') port_tuple_list = [] for port_value_pair in port_mappings: p_v = port_value_pair.split('=') if len(p_v) != 2: return None, [f'invalid port mapping: {port_value_pair}'] try: int(p_v[1]) except ValueError: return None, [f'invalid port mapping: {port_value_pair}'] port_tuple_list.append((p_v[0], p_v[1])) return port_tuple_list, None @staticmethod def _get_host_port(host): if ']' in host: host_port = host.split(']:') if len(host_port) == 2: return host_port[0] + ']', host_port[1] return host_port[0], None else: host_port = host.split(':') if len(host_port) == 2: return host_port[0], host_port[1] return host_port[0], None @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-alternate-address") @staticmethod def get_description(): return "Configure alternate addresses" class SettingQuery(Subcommand): """"Command to configure query settings""" def __init__(self): super(SettingQuery, self).__init__() self.parser.prog = "couchbase-cli setting-query" group = self.parser.add_argument_group("Query service settings") group.add_argument('--set', dest='set', action="store_true", help='Set query settings') group.add_argument('--get', dest='get', action="store_true", help='Retrieve current query settings') group.add_argument('--pipeline-batch', metavar='<num>', type=int, default=None, help='Number of items execution operators can batch.') group.add_argument('--pipeline-cap', metavar='<num>', type=int, default=None, help='Maximum number of items each execution operator can buffer.') group.add_argument('--scan-cap', metavar='<size>', type=int, default=None, help='Maximum buffer size for index scans.') group.add_argument('--timeout', metavar='<ms>', type=int, default=None, help='Server execution timeout.') group.add_argument('--prepared-limit', metavar='<max>', type=int, default=None, help='Maximum number of prepared statements.') group.add_argument('--completed-limit', metavar='<max>', type=int, default=None, help='Maximum number of completed requests.') group.add_argument('--completed-threshold', metavar='<ms>', type=int, default=None, help='Cache completed query lasting longer than this many milliseconds.') group.add_argument('--log-level', choices=['trace', 'debug', 'info', 'warn', 'error', 'sever', 'none'], default=None, metavar='<trace|debug|info|warn|error|server|none>', help='Log level: debug, trace, info, warn, error, severe, none.') group.add_argument('--max-parallelism', metavar='<max>', type=int, default=None, help='Maximum parallelism per query.') group.add_argument('--n1ql-feature-control', metavar='<num>', type=int, default=None, help='N1QL Feature Controls') group.add_argument('--temp-dir', metavar='<path>', type=str, default=None, help='This specifies the directory for temporary query data.') group.add_argument('--temp-dir-size', metavar='<mebibytes>', type=int, default=None, help='Specify the maximum size in mebibytes for the temporary query data directory.') group.add_argument('--cost-based-optimizer', metavar='<1|0>', type=str, default=None, help='Use cost-based optimizer (Developer Preview).') group.add_argument('--memory-quota', metavar='<mebibytes>', type=int, default=None, help='Sets the query memory quota in MiB.') group.add_argument('--transaction-timeout', metavar='<duration>', type=str, default=None, help='A duration string for the transaction timeout i.e (100ns, 10ms, 1s, 1m).') access_list_group = self.parser.add_argument_group('Query curl access settings') access_list_group.add_argument('--curl-access', choices=['restricted', 'unrestricted'], default=None, help='Specify either unrestricted or restricted, to determine which URLs are' ' permitted to be accessed by the curl function.') access_list_group.add_argument('--allowed-urls', metavar='<urls>', type=str, default=None, help='Comma separated lists of URLs that are allowed to be accessed by the curl' ' function.') access_list_group.add_argument('--disallowed-urls', metavar='<urls>', type=str, default=None, help='Comma separated lists of URLs that are disallowed to be accessed by the' ' curl function.') @rest_initialiser(version_check=True) def execute(self, opts): if sum([opts.get, opts.set]) != 1: _exit_if_errors(['Please provide --set or --get, both can not be provided at the same time']) if opts.get: settings, err = self.rest.get_query_settings() _exit_if_errors(err) print(json.dumps(settings)) if opts.set: access_list = self._post_query_access_list(opts) self._post_query_settings(opts, access_list) _success('Updated the query settings') def _post_query_access_list(self, opts) -> bool: if opts.curl_access != 'restricted' and (opts.allowed_urls is not None or opts.disallowed_urls is not None): _exit_if_errors(['Can only provide --allowed-urls or --disallowed-urls with --curl-access restricted']) if opts.curl_access: allowed = opts.allowed_urls.strip().split(',') if opts.allowed_urls is not None else None disallowed = opts.disallowed_urls.strip().split(',') if opts.disallowed_urls is not None else None _, err = self.rest.post_query_curl_access_settings(opts.curl_access == 'restricted', allowed, disallowed) _exit_if_errors(err) return True return False def _post_query_settings(self, opts, access_list): if all(v is None for v in [opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout, opts.prepared_limit, opts.completed_limit, opts.completed_threshold, opts.log_level, opts.max_parallelism, opts.n1ql_feature_control, opts.temp_dir, opts.temp_dir_size, opts.cost_based_optimizer, opts.memory_quota, opts.transaction_timeout]): if access_list: return _exit_if_errors(['Please provide at least one other option with --set']) _, err = self.rest.post_query_settings(opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout, opts.prepared_limit, opts.completed_limit, opts.completed_threshold, opts.log_level, opts.max_parallelism, opts.n1ql_feature_control, opts.temp_dir, opts.temp_dir_size, opts.cost_based_optimizer, opts.memory_quota, opts.transaction_timeout) _exit_if_errors(err) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-query") @staticmethod def get_description(): return "Manage query settings" class IpFamily(Subcommand): """"Command to switch between IP family for node to node communication""" def __init__(self): super(IpFamily, self).__init__() self.parser.prog = "couchbase-cli ip-family" group = self.parser.add_argument_group("IP family options") group.add_argument('--get', action="store_true", default=False, help='Retrieve current used IP family') group.add_argument('--set', action="store_true", default=False, help='Change current used IP family') group.add_argument('--ipv4', dest='ipv4', default=False, action="store_true", help='Set IP family to IPv4') group.add_argument('--ipv6', dest='ipv6', default=False, action="store_true", help='Set IP family to IPv6') @rest_initialiser(version_check=True) def execute(self, opts): flags_used = sum([opts.set, opts.get]) if flags_used == 0: _exit_if_errors(['Please provide one of --set, or --get']) elif flags_used > 1: _exit_if_errors(['Please provide only one of --set, or --get']) if opts.get: self._get(self.rest) if opts.set: if sum([opts.ipv6, opts.ipv4]) != 1: _exit_if_errors(['Provided exactly one of --ipv4 or --ipv6 together with the --set option']) self._set(self.rest, opts.ipv6, opts.ssl) @staticmethod def _set(rest, ipv6, ssl): ip_fam, ip_fam_disable = ('ipv6', 'ipv4') if ipv6 else ('ipv4', 'ipv6') node_data, err = rest.pools('nodes') if err and err[0] == '"unknown pool"': _, err = rest.enable_external_listener(ipfamily=ip_fam) _exit_if_errors(err) _, err = rest.setup_net_config(ipfamily=ip_fam) _exit_if_errors(err) _, err = rest.disable_unused_external_listeners(ipfamily=ip_fam_disable) _exit_if_errors(err) _success('Switched IP family of the cluster') return _exit_if_errors(err) hosts = [] for n in node_data['nodes']: host = f'http://{n["hostname"]}' if ssl: addr = host.rsplit(":", 1)[0] host = f'https://{addr}:{n["ports"]["httpsMgmt"]}' _, err = rest.enable_external_listener(host=host, ipfamily=ip_fam) _exit_if_errors(err) hosts.append(host) for h in hosts: _, err = rest.setup_net_config(host=h, ipfamily=ip_fam) _exit_if_errors(err) print(f'Switched IP family for node: {h}') for h in hosts: _, err = rest.disable_unused_external_listeners(host=h, ipfamily=ip_fam_disable) _exit_if_errors(err) _success('Switched IP family of the cluster') @staticmethod def _get(rest): nodes, err = rest.nodes_info() _exit_if_errors(err) fam = {} for n in nodes: fam[n['addressFamily']] = True family = list(fam.keys()) if len(family) == 1: ipv_fam = 'UNKNOWN' if family[0] == 'inet' or family[0] == 'inet_tls': ipv_fam = 'ipv4' elif family[0] == 'inet6' or family[0] == 'inet6_tls': ipv_fam = 'ipv6' print(f'Cluster using {ipv_fam}') else: print('Cluster is in mixed mode') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-ip-family") @staticmethod def get_description(): return "Change or get the address family" class NodeToNodeEncryption(Subcommand): """"Command to enable/disable cluster encryption""" def __init__(self): super(NodeToNodeEncryption, self).__init__() self.parser.prog = "couchbase-cli node-to-node-encryption" group = self.parser.add_argument_group("Node-to-node encryption options") group.add_argument('--enable', action="store_true", default=False, help='Enable node-to-node encryption') group.add_argument('--disable', action="store_true", default=False, help='Disable node-to-node encryption') group.add_argument('--get', action="store_true", default=False, help='Retrieve current status of node-to-node encryption (on or off)') @rest_initialiser(version_check=True) def execute(self, opts): flags_used = sum([opts.enable, opts.disable, opts.get]) if flags_used == 0: _exit_if_errors(['Please provide one of --enable, --disable or --get']) elif flags_used > 1: _exit_if_errors(['Please provide only one of --enable, --disable or --get']) if opts.get: self._get(self.rest) elif opts.enable: self._change_encryption(self.rest, 'on', opts.ssl) elif opts.disable: self._change_encryption(self.rest, 'off', opts.ssl) @staticmethod def _change_encryption(rest, encryption, ssl): node_data, err = rest.pools('nodes') encryption_disable = 'off' if encryption == 'on' else 'on' if err and err[0] == '"unknown pool"': _, err = rest.enable_external_listener(encryption=encryption) _exit_if_errors(err) _, err = rest.setup_net_config(encryption=encryption) _exit_if_errors(err) _, err = rest.disable_unused_external_listeners(encryption=encryption_disable) _exit_if_errors(err) _success(f'Switched node-to-node encryption {encryption}') return _exit_if_errors(err) hosts = [] for n in node_data['nodes']: host = f'http://{n["hostname"]}' if ssl: addr = host.rsplit(":", 1)[0] host = f'https://{addr}:{n["ports"]["httpsMgmt"]}' _, err = rest.enable_external_listener(host=host, encryption=encryption) _exit_if_errors(err) hosts.append(host) for h in hosts: _, err = rest.setup_net_config(host=h, encryption=encryption) _exit_if_errors(err) print(f'Turned {encryption} encryption for node: {h}') for h in hosts: _, err = rest.disable_unused_external_listeners(host=h, encryption=encryption_disable) _exit_if_errors(err) _success(f'Switched node-to-node encryption {encryption}') @staticmethod def _get(rest): # this will start the correct listeners in all the nodes nodes, err = rest.nodes_info() _exit_if_errors(err) encrypted_nodes = [] unencrpyted_nodes = [] for n in nodes: if n['nodeEncryption']: encrypted_nodes.append(n['hostname']) else: unencrpyted_nodes.append(n['hostname']) if len(encrypted_nodes) == len(nodes): print('Node-to-node encryption is enabled') elif len(unencrpyted_nodes) == len(nodes): print('Node-to-node encryption is disabled') else: print('Cluster is in mixed mode') print(f'Nodes with encryption enabled: {encrypted_nodes}') print(f'Nodes with encryption disabled: {unencrpyted_nodes}') @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-node-to-node-encryption") @staticmethod def get_description(): return "Change or get the cluster encryption configuration" class SettingRebalance(Subcommand): """The rebalance subcommand""" def __init__(self): super(SettingRebalance, self).__init__() self.parser.prog = "couchbase-cli setting-rebalance" group = self.parser.add_argument_group("Rebalance configuration") group.add_argument("--set", default=False, action='store_true', help='Set the automatic rebalance retry settings.') group.add_argument("--get", default=False, action='store_true', help='Get the automatic rebalance retry settings.') group.add_argument('--cancel', default=False, action='store_true', help='Cancel pending rebalance retry.') group.add_argument('--moves-per-node', type=int, metavar='<num>', help='Specify the number of [1-64] vBuckets to move concurrently') group.add_argument('--pending-info', default=False, action='store_true', help='Get info for pending rebalance retry.') group.add_argument("--enable", metavar="<1|0>", choices=["1", "0"], help="Enable or disable automatic rebalance retry") group.add_argument("--wait-for", metavar="<sec>", type=int, help="Specify the time to wat before retrying the rebalance [5-3600] seconds.") group.add_argument("--max-attempts", metavar="<num>", type=int, help="Maximum number of rebalance retires [1-3].") group.add_argument('--rebalance-id', metavar='<id>', help='Specify the id of the failed rebalance to cancel the retry.') @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if sum([opts.set, opts.get, opts.cancel, opts.pending_info]) != 1: _exit_if_errors(['Provide either --set, --get, --cancel or --pending-info']) if opts.get: settings, err = self.rest.get_settings_rebalance() _exit_if_errors(err) if self.enterprise: retry_settings, err = self.rest.get_settings_rebalance_retry() _exit_if_errors(err) settings.update(retry_settings) if opts.output == 'json': print(json.dumps(settings)) else: if self.enterprise: print(f'Automatic rebalance retry {"enabled" if settings["enabled"] else "disabled"}') print(f'Retry wait time: {settings["afterTimePeriod"]}') print(f'Maximum number of retries: {settings["maxAttempts"]}') print(f'Maximum number of vBucket move per node: {settings["rebalanceMovesPerNode"]}') elif opts.set: if (not self.enterprise and (opts.enable is not None or opts.wait_for is not None or opts.max_attempts is not None)): _exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"]) if opts.enable == '1': opts.enable = 'true' else: opts.enable = 'false' if opts.wait_for is not None and (opts.wait_for < 5 or opts.wait_for > 3600): _exit_if_errors(['--wait-for must be a value between 5 and 3600']) if opts.max_attempts is not None and (opts.max_attempts < 1 or opts.max_attempts > 3): _exit_if_errors(['--max-attempts must be a value between 1 and 3']) if self.enterprise: _, err = self.rest.set_settings_rebalance_retry(opts.enable, opts.wait_for, opts.max_attempts) _exit_if_errors(err) if opts.moves_per_node is not None: if not 1 <= opts.moves_per_node <= 64: _exit_if_errors(['--moves-per-node must be a value between 1 and 64']) _, err = self.rest.set_settings_rebalance(opts.moves_per_node) _exit_if_errors(err) _success('Rebalance settings updated') elif opts.cancel and not self.enterprise: _exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"]) if opts.rebalance_id is None: _exit_if_errors(['Provide the failed rebalance id using --rebalance-id <id>']) _, err = self.rest.cancel_rebalance_retry(opts.rebalance_id) _exit_if_errors(err) _success('Rebalance retry canceled') else: if not self.enterprise: _exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"]) rebalance_info, err = self.rest.get_rebalance_info() _exit_if_errors(err) print(json.dumps(rebalance_info)) @staticmethod def get_man_page_name(): return get_doc_page_name("couchbase-cli-setting-rebalance") @staticmethod def get_description(): return "Configure automatic rebalance settings" class BackupService(Subcommand): """BackupService class is a subcommand that will contain other commands to configure the service as well as manage it. This approach attempts to make the interface more intuitive by keeping a hierarchical structure where the service can have all its options under one command instead of having multiple completely separate commands (e.g settings-backups, manage-backups and repository-setup-backup.) The idea is that the interface will look like: couchbase-cli backup-service [settings | plans | repositories | cloud-credentials] where each element in [] is a subcommand to manage those options for that part of the backup service. As such if the user is not sure of what they want to do they can always do couchbase-cli backup-service -h to get a top level details and then move down the hierarchy to a more concrete option. """ def __init__(self): super(BackupService, self).__init__() self.parser.prog = "couchbase-cli backup-service" self.subparser = self.parser.add_subparsers(help='Sub command help', dest='sub_cmd', metavar='<subcommand>') self.settings_cmd = BackupServiceSettings(self.subparser) self.repository_cmd = BackupServiceRepository(self.subparser) self.plan_cmd = BackupServicePlan(self.subparser) def execute(self, opts): if opts.sub_cmd is None or opts.sub_cmd not in ['settings', 'repository', 'plan']: _exit_if_errors(['<subcommand> must be one off [settings, repository, plan]']) if opts.sub_cmd == 'settings': self.settings_cmd.execute(opts) elif opts.sub_cmd == 'repository': self.repository_cmd.execute(opts) elif opts.sub_cmd == 'plan': self.plan_cmd.execute(opts) @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return "Manage the backup service" class BackupServiceSettings: """Backup service settings is a nested command and manages the backup service settings""" def __init__(self, subparser): self.rest = None setting_parser = subparser.add_parser('settings', help='Manage backup service settings', add_help=False, allow_abbrev=False) group = setting_parser.add_argument_group('Backup service settings options') group.add_argument('--get', action='store_true', help='Get current backup service configuration') group.add_argument('--set', action='store_true', help='Change the service configuration') group.add_argument('--history-rotation-period', dest='rotation_period', type=int, metavar='<days>', help='The number of days after which the task history should be rotated') group.add_argument('--history-rotation-size', dest='rotation_size', type=int, metavar='<mebibytes>', help='The size in MiB at which to rotate the task history') group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") @rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): if sum([opts.get, opts.set]) != 1: _exit_if_errors(['Must use one and only one of [--get, --set]']) if opts.get: self._get(opts) if opts.set: self._set(opts) def _get(self, opts): config, err = self.rest.get_backup_service_settings() _exit_if_errors(err) if opts.output == 'json': print(json.dumps(config, indent=4)) else: print('-- Backup service configuration --') size = config['history_rotation_size'] if 'history_rotation_size' in config else 'N/A' period = config['history_rotation_period'] if 'history_rotation_period' in config else 'N/A' print(f'History rotation size: {size} MiB') print( f'History rotation period: {period} days') def _set(self, opts): if opts.rotation_period is None and opts.rotation_size is None: _exit_if_errors(['At least one of --history-rotation-period or --history-rotation-size is required']) _, err = self.rest.patch_backup_service_settings(opts.rotation_period, opts.rotation_size) _exit_if_errors(err) _success('Backup service settings patched') @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service-settings' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return 'Manage backup service settings' class BackupServiceRepository: """This command manages backup services repositories. Things this command can do is: - List repositories - Get repository - Add repository - Archive repository - Import repository - Delete repository """ def __init__(self, subparser): """setup the parser""" self.rest = None repository_parser = subparser.add_parser('repository', help='Manage backup repositories', add_help=False, allow_abbrev=False) # action flags are mutually exclusive action_group = repository_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='Get all repositories') action_group.add_argument('--get', action='store_true', help='Get repository by id') action_group.add_argument('--archive', action='store_true', help='Archive a repository') action_group.add_argument('--add', action='store_true', help='Add a new active repository') action_group.add_argument('--remove', action='store_true', help='Remove an archived/imported repository') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help="Prints the short or long help message") # other arguments group = repository_parser.add_argument_group('Backup service repository configuration') group.add_argument('--id', metavar='<id>', help='The repository id') group.add_argument('--new-id', metavar='<id>', help='The new repository id') group.add_argument('--state', metavar='<state>', choices=['active', 'archived', 'imported'], help='The repository state.') group.add_argument('--plan', metavar='<plan_name>', help='The plan to use as base for the repository') group.add_argument('--backup-archive', metavar='<archive>', help='The location to store the backups in') group.add_argument('--bucket-name', metavar='<name>', help='The bucket to backup') group.add_argument('--remove-data', action='store_true', help='Used to delete the repository data') # the cloud arguments are given the own group so that the short help is a bit more readable cloud_group = repository_parser.add_argument_group('Backup repository cloud arguments') cloud_group.add_argument('--cloud-credentials-name', metavar='<name>', help='The stored clouds credential name to use for the new repository') cloud_group.add_argument('--cloud-staging-dir', metavar='<path>', help='The path to the staging directory') cloud_group.add_argument('--cloud-credentials-id', metavar='<id>', help='The ID to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-key', metavar='<key>', help='The key to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-region', metavar='<region>', help='The region for the object store') cloud_group.add_argument('--cloud-endpoint', metavar='<endpoint>', help='Overrides the default endpoint used to communicate with the cloud provider. ' 'Use for object store compatible third party solutions') cloud_group.add_argument('--s3-force-path-style', action='store_true', help='When using S3 or S3 compatible storage it will use the old path style.') @rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): """Run the backup-service repository subcommand""" if opts.list: self.list_repositories(opts.state, opts.output == 'json') elif opts.get: self.get_repository(opts.id, opts.state, opts.output == 'json') elif opts.archive: self.archive_repository(opts.id, opts.new_id) elif opts.remove: self.remove_repository(opts.id, opts.state, opts.remove_data) elif opts.add: self.add_active_repository(opts.id, opts.plan, opts.backup_archive, bucket_name=opts.bucket_name, credentials_name=opts.cloud_credentials_name, credentials_id=opts.cloud_credentials_id, credentials_key=opts.cloud_credentials_key, cloud_region=opts.cloud_credentials_region, staging_dir=opts.cloud_staging_dir, cloud_endpoint=opts.cloud_endpoint, s3_path_style=opts.s3_force_path_style) def remove_repository(self, repository_id: str, state: str, delete_repo: bool = False): """Removes the repository in state 'state' and with id 'repository_id' Args: repository_id (str): The repository id state (str): It must be either archived or imported otherwise it will return an error delete_repo (bool): Whether or not the backup repository should be deleted """ if not repository_id: _exit_if_errors(['--id is required']) # the following is devided in two options to give better error messages depending if state is missing or if it # is invalid if not state: _exit_if_errors(['--state is required']) if state not in ['archived', 'imported']: _exit_if_errors(['can only delete archived or imported repositories to delete an active repository it needs to ' 'be archived first']) # can only delete repo of archived repositories if delete_repo and state == 'imported': _exit_if_errors(['cannot delete the repository for an imported repository']) _, errors = self.rest.delete_backup_repository(repository_id, state, delete_repo) _exit_if_errors(errors) _success('Repository was deleted') def add_active_repository(self, repository_id: str, plan: str, archive: str, **kwargs): """Adds a new active repository identified by 'repository_id' and that uses 'plan' as base. Args: repository_id (str): The ID to give to the repository. This must be unique, if it is not an error will be returned. plan (str): The name of the plan to use as base for the repository. If it does not exist the service will return an error. archive (str): The location to store the data in. It must be accessible by all nodes. To use S3 instead of providing a path to a filesystem directory use the syntax. s3://<bucket-name>/<optional_prefix>/<archive> **kwargs: Optional parameters [bucket_name, credentials_name, credentials_id, credentials_key, cloud_region, staging_dir, cloud_endpoint, s3_path_style] """ if not repository_id: _exit_if_errors(['--id is required']) if not plan: _exit_if_errors(['--plan is required']) if not archive: _exit_if_errors(['--backup-archive is required']) _exit_if_errors(self.check_cloud_params(archive, **kwargs)) add_request_body = { 'plan': plan, 'archive': archive, } if kwargs.get('bucket_name', False): add_request_body['bucket_name'] = kwargs.get('bucket_name') if kwargs.get('credentials_name', False): add_request_body['cloud_credential_name'] = kwargs.get('credentials_name') if kwargs.get('credentials_id', False): add_request_body['cloud_credentials_id'] = kwargs.get('credentials_id') if kwargs.get('credentials_key', False): add_request_body['cloud_credentials_key'] = kwargs.get('credentials_key') if kwargs.get('cloud_region', False): add_request_body['cloud_credentials_region'] = kwargs.get('cloud_region') if kwargs.get('cloud_endpoint', False): add_request_body['cloud_endpoint'] = kwargs.get('cloud_endpoint') if kwargs.get('s3_path_style', False): add_request_body['cloud_force_path_style'] = kwargs.get('s3_path_style') _, errors = self.rest.add_backup_active_repository(repository_id, add_request_body) _exit_if_errors(errors) _success('Added repository') @staticmethod def check_cloud_params(archive: str, **kwargs) -> Optional[List[str]]: """Checks that inside kwargs there is a valid set of parameters to add a cloud repository Args: archive (str): The archive to use for the repository. """ # If not an s3 archive skip this if not archive.startswith('s3://'): return None creds_name = kwargs.get('credentials_name') region = kwargs.get('cloud_region') creds_id = kwargs.get('credentials_id') creds_key = kwargs.get('credentials_key') staging_dir = kwargs.get('staging_dir') if (creds_name and (creds_id or creds_key)) or (not creds_name and not (creds_id or creds_key)): return ['must provide either --cloud-credentials-name or --cloud-credentials-key and ' '--cloud-credentials-id'] if not staging_dir: return ['--cloud-staging-dir is required'] if not creds_name and not region: return ['--cloud-credentials-region is required'] return None def archive_repository(self, repository_id, new_id): """Archive an repository. The archived repository will have the id `new_id` Args: repository_id (str): The active repository ID to be archived new_id (str): The id that will be given to the archived repository """ if not repository_id: _exit_if_errors(['--id is required']) if not new_id: _exit_if_errors(['--new-id is required']) _, errors = self.rest.archive_backup_repository(repository_id, new_id) _exit_if_errors(errors) _success('Archived repository') def list_repositories(self, state=None, json_out=False): """List the backup repositories. If a repository state is given only repositories in that state will be listed. This command supports listing both in json and human friendly format. Args: state (str, optional): One of ['active', 'imported', 'archived']. The repository on this state will be retrieved. json_out (bool): If True the output will be JSON otherwise it will be a human friendly format. """ states = ['active', 'archived', 'imported'] if state is None else [state] results = {} for get_state in states: repositories, errors = self.rest.get_backup_service_repositories(state=get_state) _exit_if_errors(errors) results[get_state] = repositories if json_out: print(json.dumps(results, indent=2)) else: self.human_friendly_print_repositories(results) def get_repository(self, repository_id, state, json_out=False): """Retrieves one repository from the backup service If the repository does not exist an error will be returned Args: repository_id (str): The repository id to be retrieved state (str): The state of the repository to retrieve json_out (bool): If True the output will be JSON otherwise it will be a human friendly format. """ if not repository_id: _exit_if_errors(['--id is required']) if not state: _exit_if_errors(['--state is required']) repository, errors = self.rest.get_backup_service_repository(repository_id, state) _exit_if_errors(errors) if json_out: print(json.dumps(repository, indent=2)) else: self.human_firendly_print_repository(repository) @staticmethod def human_firendly_print_repository(repository): """Print the repository in a human friendly format Args: repository (obj): The backup repository information """ print(f'ID: {repository["id"]}') print(f'State: {repository["state"]}') print(f'Healthy: {(not ("health" in repository and not repository["health"]["healthy"]))!s}') print(f'Archive: {repository["archive"]}') print(f'Repository: {repository["repo"]}') if 'bucket' in repository: print(f'Bucket: {repository["bucket"]["name"]}') if 'plan_name' in repository and repository['plan_name'] != "": print(f'plan: {repository["plan_name"]}') print(f'Creation time: {repository["creation_time"]}') if 'scheduled' in repository and repository['scheduled']: print() BackupServiceRepository.human_firendly_print_repository_scheduled_tasks(repository['scheduled']) one_off = repository['running_one_off'] if 'running_one_off' in repository else None running_scheduled = repository['running_tasks'] if 'running_tasks' in repository else None if one_off or running_scheduled: print() BackupServiceRepository.human_friendly_print_running_tasks(one_off, running_scheduled) @staticmethod def human_friendly_print_running_tasks(one_off, scheduled): """Prints the running task summary in a human friendly way Args: one_off (map<str, task object>): Running one off tasks scheduled (map<str, task object>): Running scheduled tasks """ all_vals = [] name_pad = 5 if one_off: for name in one_off: if len(name) > name_pad: name_pad = len(name) all_vals += one_off.values() if scheduled: for name in scheduled: if len(name) > name_pad: name_pad = len(name) all_vals += scheduled.values() name_pad += 1 header = f'{"Name":<{name_pad}}| Task type | Status | Start' print(header) print('-' * (len(header) + 5)) for task in all_vals: print(f'{task["name"]:<{name_pad}}| {task["type"].title():<10}| {task["status"]:<8} | {task["start"]}') @staticmethod def human_firendly_print_repository_scheduled_tasks(scheduled): """Print the scheduled task in a tabular format""" name_pad = 5 for name in scheduled: if len(name) > name_pad: name_pad = len(name) name_pad += 1 header = f'{"Name":<{name_pad}}| Task type | Next run' print('Scheduled tasks:') print(header) print('-' * (len(header) + 5)) for task in scheduled.values(): print(f'{task["name"]:<{name_pad}}| {task["task_type"].title():<10}| {task["next_run"]}') @staticmethod def human_friendly_print_repositories(repositories_map): """This will print the repositories in a tabular format Args: repository_map (map<state (str), repository (list of objects)>) """ repository_count = 0 id_pad = 5 plan_pad = 7 for repositories in repositories_map.values(): for repository in repositories: repository_count += 1 if id_pad < len(repository['id']): id_pad = len(repository['id']) if 'plan_name' in repository and plan_pad < len(repository['plan_name']): plan_pad = len(repository['plan_name']) if repository_count == 0: print('No repositories found') return # Get an extra space between the the information and the column separator plan_pad += 1 id_pad += 1 # build header header = f'{"ID":<{id_pad}}| {"State":<9}| {"plan":<{plan_pad}}| Healthy | Repository' print(header) print('-' * len(header)) # print repository summary for _, repositories in sorted(repositories_map.items()): for repository in repositories: healthy = not ('health' in repository and not repository['health']['healthy']) # archived and imported repositories may not have plans so we have to replace the empty string with N/A plan_name = 'N/A' if 'plan_name' in repository and len(repository['plan_name']) != 0: plan_name = repository['plan_name'] print(f"{repository['id']:<{id_pad}}| {repository['state']:<9}| {plan_name:<{plan_pad}}| " f" {healthy!s:<7}| {repository['repo']}") @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service-repository' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return 'Manage backup service repositories' class BackupServicePlan: """This command manages backup services plans. Things this command can do is: - List plans - Add delete - Delete plans """ def __init__(self, subparser): """setup the parser""" self.rest = None plan_parser = subparser.add_parser('plan', help='Manage backup plans', add_help=False, allow_abbrev=False) # action flags are mutually exclusive action_group = plan_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='List all available backup plans') action_group.add_argument('--get', action='store_true', help='Get a plan by name') action_group.add_argument('--remove', action='store_true', help='Remove a plan by name') action_group.add_argument('--add', action='store_true', help='Add a new plan') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help="Prints the short or long help message") options = plan_parser.add_argument_group('Plan options') options.add_argument('--name', metavar='<name>', help='Plan name') options.add_argument('--description', metavar='<description>', help='Optional description') options.add_argument('--services', metavar='<services>', help='A comma separated list of services to backup') options.add_argument('--task', metavar='<tasks>', nargs='+', help='JSON task definition') @rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): """Run the backup plan managment command""" if opts.list: self.list_plans(opts.output == 'json') elif opts.get: self.get_plan(opts.name, opts.output == 'json') elif opts.remove: self.remove_plan(opts.name) elif opts.add: self.add_plan(opts.name, opts.services, opts.task, opts.description) def add_plan(self, name: str, services: Optional[str], tasks: Optional[List[str]], description: Optional[str]): """Add a new backup plan The validation of the inputs in the CLI is intentionally lacking as this is offloaded to the backup service. Args: name (str): The name to give the new plan. It must be unique. services (optional list): A list of services to backup if empty all services are backed up. tasks (optional list): A list of JSON strings representing the tasks to be run. description (optional str): A optional description string. """ if not name: _exit_if_errors(['--name is required']) service_list = [] if services: service_list = [service.strip() for service in services.split(',')] tasks_objects = [] if tasks: for task_str in tasks: try: task = json.loads(task_str) tasks_objects.append(task) except json.decoder.JSONDecodeError as json_error: _exit_if_errors([f'invalid task {json_error!s}']) plan = {} if service_list: plan['services'] = service_list if tasks_objects: plan['tasks'] = tasks_objects if description: plan['description'] = description _, errors = self.rest.add_backup_plan(name, plan) _exit_if_errors(errors) _success('Added plan') def remove_plan(self, name: str): """Removes a plan by name""" if not name: _exit_if_errors(['--name is required']) _, errors = self.rest.delete_backup_plan(name) _exit_if_errors(errors) _success('Plan removed') def get_plan(self, name: str, json_output: bool = False): """Gets a backup plan by name Args: name (str): The name of the plan to retrieve json_output (bool): Whether to print in JSON or a more human friendly way """ if not name: _exit_if_errors(['--name is required']) plan, errors = self.rest.get_backup_plan(name) _exit_if_errors(errors) if json_output: print(json.dumps(plan, indent=2)) else: self.human_print_plan(plan) def list_plans(self, json_output: bool = False): """Prints all the plans stored in the backup service Args: json_output (bool): Whether to print in JSON or a more human friendly way """ plans, errors = self.rest.list_backup_plans() _exit_if_errors(errors) if json_output: print(json.dumps(plans, indent=2)) else: self.human_print_plans(plans) @staticmethod def human_print_plan(plan: object): """Prints the plan in a human friendly way""" print(f'Name: {plan["name"]}') print(f'Description: {plan["description"] if "description" in plan else "N/A"}') print(f'Services: {BackupServicePlan.service_list_to_str(plan["services"])}') print(f'Default: {(plan["default"] if "deafult" in plan else False)!s}') # If the are no tasks return if not plan["tasks"]: return print() print('Tasks:') task_name_pad = 5 schedule_pad = 10 for task in plan['tasks']: if len(task['name']) > task_name_pad: task_name_pad = len(task['name']) task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule']) if len(task['schedule_str']) > schedule_pad: schedule_pad = len(task['schedule_str']) task_name_pad += 1 schedule_pad += 1 header = f'{"Name":<{task_name_pad}} | {"Schedule":<{schedule_pad}} | Options' print(header) print('-' * (len(header) + 5)) for task in plan['tasks']: options = BackupServicePlan.format_options(task) print(f'{task["name"]:<{task_name_pad}} | {task["schedule_str"]:<{schedule_pad}} | {options}') @staticmethod def format_options(task: object) -> str: """Format the full backup or merge options""" options = 'N/A' if task['task_type'] == 'BACKUP' and task['full_backup']: options = 'Full backup' elif task['task_type'] == 'MERGE': if 'merge_options' in task: options = (f'Merge from {task["merge_options"]["offset_start"]} to ' f'{task["merge_options"]["offset_end"]}') else: options = 'Merge everything' return options @staticmethod def format_schedule(schedule: object) -> str: """Format the schedule object in a string of the format <task> every <frequency>? <period> (at <time>)?""" task_start = f'{schedule["job_type"].lower()}' frequency_part = 'every' if schedule['frequency'] == 1: period = schedule["period"].lower() period = period if period[-1] != 's' else period[:-1] frequency_part += f' {period}' else: frequency_part += f' {schedule["frequency"]} {schedule["period"].lower()}' time_part = f' at {schedule["time"]}' if 'time' in schedule else '' return f'{task_start} {frequency_part}{time_part}' @staticmethod def human_print_plans(plans: List[Any]): """Prints a table with an overview of each plan""" # if plans is empty or none print no plans message if not plans: print('No plans') return name_pad = 5 service_pad = 8 for plan in plans: if len(plan['name']) > name_pad: name_pad = len(plan['name']) services_str = BackupServicePlan.service_list_to_str(plan['services']) if len(services_str) > service_pad: service_pad = len(services_str) name_pad += 1 service_pad += 1 header = f'{"Name":<{name_pad}} | # Tasks | {"Services":<{service_pad}} | Default' print(header) print('-' * (len(header) + 5)) for plan in plans: task_len = len(plan['tasks']) if 'tasks' in plan and plan['tasks'] else 0 print(f'{plan["name"]:<{name_pad}} | {task_len:<7} | ' f'{BackupServicePlan.service_list_to_str(plan["services"]):<{service_pad}} | ' f'{(plan["default"] if "default" in plan else False)!s}') @staticmethod def service_list_to_str(services: Optional[List[Any]]) -> str: """convert the list of services to a concise list of services""" if not services: return 'all' # a way to convert codenames to visible name convert = {'gsi': 'Indexing', 'cbas': 'Analytics', 'ft': 'Full Text Search'} return ', '.join([convert[service] if service in convert else service.title() for service in services]) @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service-plan' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return 'Manage backup service plans'
{ "repo_name": "couchbase/couchbase-cli", "path": "cbmgr.py", "copies": "1", "size": "266393", "license": "apache-2.0", "hash": 2712867302306999300, "line_mean": 46.9469042477, "line_max": 124, "alpha_frac": 0.5824026908, "autogenerated": false, "ratio": 4.188832630983081, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.527123532178308, "avg_score": null, "num_lines": null }
""" A countdown in a 16x2 LCD. It is possible to pause or reset the countdown using buttons. When the countdown ends buzzes continuously. Requires: - Adafruit_CharLCD (https://github.com/adafruit/Adafruit_Python_CharLCD) Hardware: - 16x2 LCD (8 digital pins) - Buzzer (1 digital pin) - 2x buttons (1 digital pin each) """ import time import pingo import Adafruit_CharLCD.Adafruit_CharLCD as LCD lcd, buzzer, pause, pause_status, reset, countdown, COUNTDOWN = (None,) * 7 def startup(): global lcd, buzzer, pause, pause_status, reset, countdown, COUNTDOWN # LCD pins lcd_rs = 18 lcd_en = 27 lcd_d4, lcd_d5, lcd_d6, lcd_d7 = 22, 23, 24, 25 # LCD config lcd_backlight = 4 lcd_columns = 16 lcd_rows = 2 lcd = LCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight) board = pingo.detect.get_board() # buzzer pin (BOARD) buzzer = board.pins[33] buzzer.mode = pingo.OUT # pause and reset buttons (BOARD) pause = board.pins[29] pause.mode = pingo.IN reset = board.pins[31] reset.mode = pingo.IN COUNTDOWN = 300 pause_status = False countdown = COUNTDOWN def beep(interval=0.06, times=4): for _ in range(times): buzzer.hi() time.sleep(interval) buzzer.lo() time.sleep(interval) def seconds_to_minutes(seconds): minutes = str(countdown / 60).zfill(2) seconds = str(countdown % 60).zfill(2) return '{} min {} seg'.format(minutes, seconds) def reset_countdown(delay_seconds=3): beep(interval=0.03, times=3) for seconds in reversed(range(1, delay_seconds + 1)): lcd.clear() lcd.message(u'RESETANDO...\n{} sec'.format(seconds)) time.sleep(1) global countdown countdown = COUNTDOWN lcd.clear() if __name__ == "__main__": startup() while True: lcd.clear() if pause.state == pingo.HIGH: pause_status = not pause_status lcd.clear() beep(interval=0.03, times=2) if pause_status: lcd.message(u'PAUSADO em\n{}'.format(seconds_to_minutes(countdown))) time.sleep(2.5) continue if reset.state == pingo.HIGH: reset_countdown(3) if countdown == 0: lcd.message(u'Acabou! Acabou!\nO turno acabou!') beep() time.sleep(0.999) continue lcd.message(u'Turno acaba em:\n{}'.format(seconds_to_minutes(countdown))) time.sleep(0.999) countdown -= 1
{ "repo_name": "lamenezes/pingo-py", "path": "pingo/examples/lcd16x2_countdown.py", "copies": "1", "size": "2575", "license": "mit", "hash": -5359123334487941000, "line_mean": 22.4090909091, "line_max": 81, "alpha_frac": 0.6027184466, "autogenerated": false, "ratio": 3.040141676505313, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4142860123105313, "avg_score": null, "num_lines": null }
#a count with metadata writing func #count_saxs( 'Silica 1 fr x .1 exp, mbs 0.1x0.6, bds 15x15, prekin .1x.08', 1, .1, .1, new_pos=True) #RE(count_saxs( 'Silica 1 fr x .1 exp, mbs 0.1x0.6, bds 15x15, prekin .1x.08', 1, .1, .1, new_pos=True)) #RE(go_to_sleep()) #RE(count_saxs( 'AuNP_2DSheet',1,.5,None,new_pos=True)) ##def multi_scan( num=10): ## i=0 ## for i in range(num): ## RE(count_saxs( 'Au1D_Ladder',1,1,None,new_pos=True, bpm_on=False)) ## print (i) ## i=i+1 #RE = gs.RE # convenience alias def shutter_test(): for i in range(1,56): if caget('XF:11IDB-VA{Att:1-CCG:1}P-I')<=1.1E-7: print('taking data series with fast shutter...') series(shutter_mode='multi',expt=.00134,acqp=.02,imnum=2000,comment='shutter test') else: print(time.ctime+': vacuum > 1.1E-7 -> waiting for 15 min...') sleep(900) if caget('XF:11IDB-VA{Att:1-CCG:1}P-I')<=1.1E-7: print('taking data series with fast shutter...') series(shutter_mode='multi',expt=.00134,acqp=.02,imnum=2000,comment='shutter test') else: raise vac_Exception('something is wrong: vacuum does not recover...') print(str(time.ctime)+': waiting for 15 min...') sleep(900) class vac_Exception(Exception): pass def capillary_bottom_in(): ''' This function is wrote for Sandro's three samples, Nov 2, 2016''' mov(diff.xh,0.45) RE.md['sample']='S2_reference_sample' RE.md['sample_description']='Au_30nm_in_0.6mole_glycerol aqueous solution' mov(diff.yh,0.37) def capillary_middle_in(): mov(diff.xh, 0.45) RE.md['sample']='g_0.15' RE.md['sample_description']='Au_30nm_in_0.15mole_glycerol aqueous solution' mov(diff.yh, 5.5) def capillary_top_in(): mov(diff.xh, 0.45) RE.md['sample']='g_0.1' RE.md['sample_description']='Au_30nm_in_0.1mole_glycerol aqueous solution' mov(diff.yh, 10.5) def xpcs_measure( Measurement ): ''' XPCS routine, Nov 3, 2016''' T = caget( 'XF:11IDB-ES{Env:01-Chan:C}T-I') RE.md['Temperature']= T print( RE.md ) take_xpcs_series( Measurement + '--T=%s'%T ) def xpcs_measure_not_used( pos, Measurement ): ''' This function is wrote for Sandro's three samples, Nov 2, 2016''' T = caget( 'XF:11IDB-ES{Env:01-Chan:C}T-I') if pos=='bottom': capillary_bottom_in() print('It will measure the bottom cappilary') elif pos=='middle': capillary_middle_in() print('It will measure the middle cappilary') elif pos=='top': capillary_bottom_in() print('It will measure the top cappilary') RE.md['Temperature']= T take_xpcs_series( 'S2' + '--'+ Measurement + '--T=%s'%T ) def measure_bottom( T, Measurement ): ''' This function is wrote for Sandro's three samples, Nov 2, 2016''' capillary_bottom_in() RE.md['sample']='S2_reference_sample' RE.md['sample_description']='Au_30nm_in_0.6mole_glycerol aqueous solution' RE.md['Temperature']= T take_xpcs_series( 'S2' + '--'+ Measurement + '--T=%s'%T ) def measure_middle( T, Measurement ): ''' This function is wrote for Sandro's three samples, Nov 2, 2016''' capillary_middle_in() RE.md['sample']='g_0.15' RE.md['sample_description']='Au_30nm_in_0.15mole_glycerol aqueous solution' RE.md['Temperature']= T take_xpcs_series( RE.md['sample'] + '--'+ Measurement + '--T=%s'%T ) def measure_top( T, Measurement ): ''' This function is wrote for Sandro's three samples, Nov 2, 2016''' capillary_top_in() RE.md['sample']='g_0.1' RE.md['sample_description']='Au_30nm_in_0.1mole_glycerol aqueous solution' RE.md['Temperature']= T take_xpcs_series( RE.md['sample'] + '--'+ Measurement + '--T=%s'%T ) def take_xpcs_series( Measurement ): ''' Oct 24, 2016, for Pierce measurements Open fast shutter, stay one second, Switch X/Y BPM feedback Take count ''' RE( bp.abs_set( fast_sh, 1 ) ) sleep(1) xbpm_y_pos = 'XF:11IDB-BI{XBPM:02}Fdbk:BEn-SP' xbpm_x_pos = 'XF:11IDB-BI{XBPM:02}Fdbk:AEn-SP' caput( xbpm_y_pos, 1 ) caput( xbpm_x_pos, 1 ) RE(count([eiger4m_single]),Measurement= Measurement ) def count_saxs(type, fnum=1, expt= 0.1, acqt = None, att_t = 1, save=True, new_pos = False, bpm_on=False): #RE( count_('alignment', 1, 0.1, 0.1, att_t = 1, save=True, new_pos = False) ) #RE( count_( 'XPCS_200C_1000 frames_1s', 1000, 1, 1 , att_t = 1, save=True, new_pos = False) ) if acqt is None: acqt=expt type=type+ ' %d fr X %s exp'%(fnum,expt) if att_t!=1: type=type+ ' %d fr X %s exp'%(fnum,expt) + 'att_%s'%att_t att.set_T ( att_t ) #put atten RE.md['Measurement']=type ##Did not find how to set save eiger4m_save = 'XF:11IDB-ES{Det:Eig4M}cam1:SaveFiles' caput ( eiger4m_save,save ) yield from bp.abs_set(eiger4m.cam.num_images, fnum) yield from bp.abs_set(eiger4m.cam.acquire_time, expt) yield from bp.abs_set(eiger4m.cam.acquire_period, acqt ) yield from bp.abs_set(eiger4m.cam.array_counter,0) yield from YAG_FastSh( yag='off', fs='on' ) #put fast shutter on, yag at empty position BPMFeed( xbpm_y= 'on' ) sleep(3) BPMFeed( xbpm_y= 'on' ) if new_pos: yield from bp.abs_set(diff.yh, diff.yh.user_readback.value + 0.05) yield from count( [eiger4m_single]) #caput( eiger4m_save, False) yield from bp.abs_set(eiger4m.cam.num_images, 1) yield from bp.abs_set(eiger4m.cam.acquire_period, .01 ) yield from bp.abs_set(eiger4m.cam.acquire_time, .01) if bpm_on: yield from go_to_sleep() #YAG_FastSh_BPMFeed( 'off', 'on','on') def go_to_sleep( ): yield from YAG_FastSh( yag='on', fs='on' ) BPMFeed( xbpm_y= 'on' ) sleep(3) BPMFeed( xbpm_y= 'on' ) #a count with metadata writing func def count_gisaxs(type, fnum=1, acqt = 0.1, expt= 0.1, phh = -0.16, att_t = 1, saxs_bstx = 111.6, save=True, new_pos = False): #RE( count_('alignment', 1, 0.1, 0.1, att_t = 1, save=True, new_pos = False) ) #RE( count_( 'XPCS_200C_1000 frames_1s', 1000, 1, 1 , att_t = 1, save=True, new_pos = False) ) RE.md['Measurement']=type ##Did not find how to set save eiger4m_save = 'XF:11IDB-ES{Det:Eig4M}cam1:SaveFiles' caput ( eiger4m_save,save ) #caput('XF:11IDB-ES{Det:Eig4M}cam1:NumImages', fnum ) #caput('XF:11IDB-ES{Det:Eig4M}cam1:AcquireTime', acqt ) #caput('XF:11IDB-ES{Det:Eig4M}cam1:AcquirePeriod', expt) yield from bp.abs_set(eiger4m.cam.num_images, fnum) yield from bp.abs_set(eiger4m.cam.acquire_period, acqt ) yield from bp.abs_set(eiger4m.cam.acquire_time, expt) yield from bp.abs_set(saxs_bst.x, saxs_bstx ) #move beamstop in yield from bp.abs_set(diff.phh, phh) yield from bp.abs_set(eiger4m.cam.array_counter,0) if new_pos: yield from bp.abs_set(diff.xh, diff.xh.user_readback.value + 0.05) #yield from bp.abs_set(diff.yh, diff.yh.user_readback.value + 0.05) # return None yield from YAG_FastSh( yag='off', fs='on' ) BPMFeed( xbpm_y= 'on' ) att.set_T ( att_t ) #put atten BPMFeed( xbpm_y= 'on' ) yield from count( [eiger4m_single]) caput( eiger4m_save, False) #set Eiger4M image counter as 0 def imn(): yield from bp.abs_set(eiger4m.cam.array_counter,0) def gisaxs_yh_align( yh = None, phh = 0, att_t = 1e-4, saxs_bstx = 111.6): #RE(gisaxs_yh_align( yh = None, phh = 0, att_t = 1e-4, saxs_bstx = 111.6)) att.set_T ( att_t ) #put atten yield from bp.abs_set(diff.phh, phh) yield from bp.abs_set(saxs_bst.x, saxs_bstx + 5) #move beamstop away from beam +5 mm if yh is not None: yield from bp.abs_set(diff.yh,yh) eiger4m_save = 'XF:11IDB-ES{Det:Eig4M}cam1:SaveFiles' caput ( eiger4m_save, False ) caput('XF:11IDB-ES{Det:Eig4M}cam1:NumImages',1) caput('XF:11IDB-ES{Det:Eig4M}cam1:AcquireTime', 0.1) caput('XF:11IDB-ES{Det:Eig4M}cam1:AcquirePeriod',0.1) yield from dscan(diff.yh,-.05,.05,25) #Fast YAG in/out, shutter on/off, BPM_Feedback on/off def YAG_FastSh( yag='on', fs='on' ): yag_pos = 'XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL' # 26 for empty, xbpm_y_pos = 'XF:11IDB-BI{XBPM:02}Fdbk:BEn-SP' fs_pos = 'XF:11IDB-ES{Zebra}:SOFT_IN:B0' if yag is 'on': #caput( yag_pos, 30) if abs(foil_x.user_readback.value - (30))>=.3: yield from bp.abs_set( foil_x.user_setpoint, 30 ) sleep(20) print ('YAG is in the beam') else: if abs(foil_x.user_readback.value - (-26))>=.3: yield from bp.abs_set( foil_x.user_setpoint, -26.0 ) sleep(20) print ('Empty is in the beam') if fs is 'on': yield from bp.abs_set( fast_sh, 1 ) else: yield from bp.abs_set( fast_sh, 0 ) def BPMFeed( xbpm_y= 'on' ): xbpm_y_pos = 'XF:11IDB-BI{XBPM:02}Fdbk:BEn-SP' if xbpm_y is 'on': caput( xbpm_y_pos, 1 ) else: caput( xbpm_y_pos, 0 ) #Fast YAG in/out, shutter on/off, BPM_Feedback on/off def YAG_FastSh_BPMFeed( yag='on', fs='on', xbpm_y= 'on' ): yag_pos = 'XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL' # 26 for empty, xbpm_y_pos = 'XF:11IDB-BI{XBPM:02}Fdbk:BEn-SP' fs_pos = 'XF:11IDB-ES{Zebra}:SOFT_IN:B0' if fs is 'on': caput( fs_pos, 1) else: caput( fs_pos, 0 ) xbpm_y = 'off' if xbpm_y is 'on': sleep(5) caput( xbpm_y_pos, 1 ) else: caput( xbpm_y_pos, 0 ) if yag is 'on': caput( yag_pos, 30) else: caput( yag_pos, -26)
{ "repo_name": "NSLS-II-CHX/ipython_ophyd", "path": "startup/36-commisionning.py", "copies": "2", "size": "9808", "license": "bsd-2-clause", "hash": -2947831119223505000, "line_mean": 30.3354632588, "line_max": 104, "alpha_frac": 0.5935970636, "autogenerated": false, "ratio": 2.5084398976982096, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.410203696129821, "avg_score": null, "num_lines": null }
# A couple models for Haystack to test with. import datetime from django.db import models class MockTag(models.Model): name = models.CharField(max_length=32) class MockModel(models.Model): author = models.CharField(max_length=255) foo = models.CharField(max_length=255, blank=True) pub_date = models.DateTimeField(default=datetime.datetime.now) tag = models.ForeignKey(MockTag) def __unicode__(self): return self.author def hello(self): return 'World!' class AnotherMockModel(models.Model): author = models.CharField(max_length=255) pub_date = models.DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.author class AThirdMockModel(AnotherMockModel): average_delay = models.FloatField(default=0.0) view_count = models.PositiveIntegerField(default=0) class CharPKMockModel(models.Model): key = models.CharField(primary_key=True, max_length=10) class AFourthMockModel(models.Model): author = models.CharField(max_length=255) editor = models.CharField(max_length=255) pub_date = models.DateTimeField(default=datetime.datetime.now) def __unicode__(self): return self.author class SoftDeleteManager(models.Manager): def get_query_set(self): return super(SoftDeleteManager, self).get_query_set().filter(deleted=False) def complete_set(self): return super(SoftDeleteManager, self).get_query_set() class AFifthMockModel(models.Model): author = models.CharField(max_length=255) deleted = models.BooleanField(default=False) objects = SoftDeleteManager() def __unicode__(self): return self.author class ASixthMockModel(models.Model): name = models.CharField(max_length=255) lat = models.FloatField() lon = models.FloatField() def __unicode__(self): return self.name class ScoreMockModel(models.Model): score = models.CharField(max_length=10) def __unicode__(self): return self.score
{ "repo_name": "Architizer/django-haystack", "path": "tests/core/models.py", "copies": "11", "size": "2013", "license": "bsd-3-clause", "hash": 1865665442344864300, "line_mean": 25.4868421053, "line_max": 83, "alpha_frac": 0.7044212618, "autogenerated": false, "ratio": 3.741635687732342, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0015543004952504312, "num_lines": 76 }
# A couple of classes to assist with resolution calculations - these # are for calculating resolution (d, s) for either distance / beam / # wavelength / position or h, k, l, / unit cell. import logging import math import os import tempfile from xia2.Wrappers.CCP4.Pointless import Pointless from xia2.Modules.Scaler.rebatch import rebatch logger = logging.getLogger("xia2.Experts.ResolutionExperts") def meansd(values): if not values: return 0.0, 0.0 if len(values) == 1: return values[0], 0.0 mean = sum(values) / len(values) sd = 0.0 for v in values: sd += (v - mean) * (v - mean) sd /= len(values) return mean, math.sqrt(sd) def find_blank(hklin): try: # first dump to temp. file with tempfile.NamedTemporaryFile( suffix=".hkl", dir=os.environ["CCP4_SCR"], delete=False ) as fh: hklout = fh.name p = Pointless() p.set_hklin(hklin) _ = p.sum_mtz(hklout) if os.path.getsize(hklout) == 0: logger.debug("Pointless failed:") logger.debug("".join(p.get_all_output())) raise RuntimeError("Pointless failed: no output file written") isig = {} with open(hklout) as fh: for record in fh: lst = record.split() if not lst: continue batch = int(lst[3]) i, sig = float(lst[4]), float(lst[5]) if not sig: continue if batch not in isig: isig[batch] = [] isig[batch].append(i / sig) finally: os.remove(hklout) # look at the mean and sd blank = [] good = [] for batch in sorted(isig): m, s = meansd(isig[batch]) if m < 1: blank.append(batch) else: good.append(batch) return blank, good def remove_blank(hklin, hklout): """Find and remove blank batches from the file. Returns hklin if no blanks.""" blanks, goods = find_blank(hklin) if not blanks: return hklin # if mostly blank return hklin too... if len(blanks) > len(goods): logger.debug("%d blank vs. %d good: ignore", len(blanks), len(goods)) return hklin rebatch(hklin, hklout, exclude_batches=blanks) return hklout
{ "repo_name": "xia2/xia2", "path": "src/xia2/Experts/ResolutionExperts.py", "copies": "1", "size": "2396", "license": "bsd-3-clause", "hash": -3575424192972167700, "line_mean": 22.0384615385, "line_max": 77, "alpha_frac": 0.5550918197, "autogenerated": false, "ratio": 3.608433734939759, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9663525554639758, "avg_score": 0, "num_lines": 104 }
"""A couple of point pens to filter contours in various ways.""" from fontTools.pens.basePen import AbstractPen, BasePen from robofab.pens.pointPen import AbstractPointPen from robofab.objects.objectsRF import RGlyph as _RGlyph from robofab.objects.objectsBase import _interpolatePt import math # # threshold filtering # def distance(pt1, pt2): return math.hypot(pt1[0]-pt2[0], pt1[1]-pt2[1]) class ThresholdPointPen(AbstractPointPen): """ Rewrite of the ThresholdPen as a PointPen so that we can preserve named points and other arguments. This pen will add components from the original glyph, but but it won't filter those components. "move", "line", "curve" or "qcurve" """ def __init__(self, otherPointPen, threshold=10): self.threshold = threshold self._lastPt = None self._offCurveBuffer = [] self.otherPointPen = otherPointPen def beginPath(self): """Start a new sub path.""" self.otherPointPen.beginPath() self._lastPt = None def endPath(self): """End the current sub path.""" self.otherPointPen.endPath() def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): """Add a point to the current sub path.""" if segmentType in ['curve', 'qcurve']: # it's an offcurve, let's buffer them until we get another oncurve # and we know what to do with them self._offCurveBuffer.append((pt, segmentType, smooth, name, kwargs)) return elif segmentType == "move": # start of an open contour self.otherPointPen.addPoint(pt, segmentType, smooth, name) # how to add kwargs? self._lastPt = pt self._offCurveBuffer = [] elif segmentType == "line": if self._lastPt is None: self.otherPointPen.addPoint(pt, segmentType, smooth, name) # how to add kwargs? self._lastPt = pt elif distance(pt, self._lastPt) >= self.threshold: # we're oncurve and far enough from the last oncurve if self._offCurveBuffer: # empty any buffered offcurves for buf_pt, buf_segmentType, buf_smooth, buf_name, buf_kwargs in self._offCurveBuffer: self.otherPointPen.addPoint(buf_pt, buf_segmentType, buf_smooth, buf_name) # how to add kwargs? self._offCurveBuffer = [] # finally add the oncurve. self.otherPointPen.addPoint(pt, segmentType, smooth, name) # how to add kwargs? self._lastPt = pt else: # we're too short, so we're not going to make it. # we need to clear out the offcurve buffer. self._offCurveBuffer = [] def addComponent(self, baseGlyphName, transformation): """Add a sub glyph. Note: this way components are not filtered.""" self.otherPointPen.addComponent(baseGlyphName, transformation) class ThresholdPen(AbstractPen): """Removes segments shorter in length than the threshold value.""" def __init__(self, otherPen, threshold=10): self.threshold = threshold self._lastPt = None self.otherPen = otherPen def moveTo(self, pt): self._lastPt = pt self.otherPen.moveTo(pt) def lineTo(self, pt, smooth=False): if self.threshold <= distance(pt, self._lastPt): self.otherPen.lineTo(pt) self._lastPt = pt def curveTo(self, pt1, pt2, pt3): if self.threshold <= distance(pt3, self._lastPt): self.otherPen.curveTo(pt1, pt2, pt3) self._lastPt = pt3 def qCurveTo(self, *points): if self.threshold <= distance(points[-1], self._lastPt): self.otherPen.qCurveTo(*points) self._lastPt = points[-1] def closePath(self): self.otherPen.closePath() def endPath(self): self.otherPen.endPath() def addComponent(self, glyphName, transformation): self.otherPen.addComponent(glyphName, transformation) def thresholdGlyph(aGlyph, threshold=10): """ Convenience function that handles the filtering. """ from robofab.pens.adapterPens import PointToSegmentPen new = _RGlyph() filterpen = ThresholdPen(new.getPen(), threshold) wrappedPen = PointToSegmentPen(filterpen) aGlyph.drawPoints(wrappedPen) aGlyph.clear() aGlyph.appendGlyph(new) aGlyph.update() return aGlyph def thresholdGlyphPointPen(aGlyph, threshold=10): """ Same a thresholdGlyph, but using the ThresholdPointPen, which should respect anchors.""" from robofab.pens.adapterPens import PointToSegmentPen new = _RGlyph() wrappedPen = new.getPointPen() filterpen = ThresholdPointPen(wrappedPen, threshold) aGlyph.drawPoints(filterpen) aGlyph.clear() new.drawPoints(aGlyph.getPointPen()) aGlyph.update() return aGlyph # # Curve flattening # def _estimateCubicCurveLength(pt0, pt1, pt2, pt3, precision=10): """Estimate the length of this curve by iterating through it and averaging the length of the flat bits. """ points = [] length = 0 step = 1.0/precision factors = range(0, precision+1) for i in factors: points.append(_getCubicPoint(i*step, pt0, pt1, pt2, pt3)) for i in range(len(points)-1): pta = points[i] ptb = points[i+1] length += distance(pta, ptb) return length def _mid((x0, y0), (x1, y1)): """(Point, Point) -> Point\nReturn the point that lies in between the two input points.""" return 0.5 * (x0 + x1), 0.5 * (y0 + y1) def _getCubicPoint(t, pt0, pt1, pt2, pt3): if t == 0: return pt0 if t == 1: return pt3 if t == 0.5: a = _mid(pt0, pt1) b = _mid(pt1, pt2) c = _mid(pt2, pt3) d = _mid(a, b) e = _mid(b, c) return _mid(d, e) else: cx = (pt1[0] - pt0[0]) * 3 cy = (pt1[1] - pt0[1]) * 3 bx = (pt2[0] - pt1[0]) * 3 - cx by = (pt2[1] - pt1[1]) * 3 - cy ax = pt3[0] - pt0[0] - cx - bx ay = pt3[1] - pt0[1] - cy - by t3 = t ** 3 t2 = t * t x = ax * t3 + bx * t2 + cx * t + pt0[0] y = ay * t3 + by * t2 + cy * t + pt0[1] return x, y class FlattenPen(BasePen): """Process the contours into a series of straight lines by flattening the curves. """ def __init__(self, otherPen, approximateSegmentLength=5, segmentLines=False, filterDoubles=True): self.approximateSegmentLength = approximateSegmentLength BasePen.__init__(self, {}) self.otherPen = otherPen self.currentPt = None self.firstPt = None self.segmentLines = segmentLines self.filterDoubles = filterDoubles def _moveTo(self, pt): self.otherPen.moveTo(pt) self.currentPt = pt self.firstPt = pt def _lineTo(self, pt): if self.filterDoubles: if pt == self.currentPt: return if not self.segmentLines: self.otherPen.lineTo(pt) self.currentPt = pt return d = distance(self.currentPt, pt) maxSteps = int(round(d / self.approximateSegmentLength)) if maxSteps < 1: self.otherPen.lineTo(pt) self.currentPt = pt return step = 1.0/maxSteps factors = range(0, maxSteps+1) for i in factors[1:]: self.otherPen.lineTo(_interpolatePt(self.currentPt, pt, i*step)) self.currentPt = pt def _curveToOne(self, pt1, pt2, pt3): est = _estimateCubicCurveLength(self.currentPt, pt1, pt2, pt3)/self.approximateSegmentLength maxSteps = int(round(est)) falseCurve = (pt1==self.currentPt) and (pt2==pt3) if maxSteps < 1 or falseCurve: self.otherPen.lineTo(pt3) self.currentPt = pt3 return step = 1.0/maxSteps factors = range(0, maxSteps+1) for i in factors[1:]: pt = _getCubicPoint(i*step, self.currentPt, pt1, pt2, pt3) self.otherPen.lineTo(pt) self.currentPt = pt3 def _closePath(self): self.lineTo(self.firstPt) self.otherPen.closePath() self.currentPt = None def _endPath(self): self.otherPen.endPath() self.currentPt = None def addComponent(self, glyphName, transformation): self.otherPen.addComponent(glyphName, transformation) def flattenGlyph(aGlyph, threshold=10, segmentLines=True): """Replace curves with series of straight l ines.""" from robofab.pens.adapterPens import PointToSegmentPen if len(aGlyph.contours) == 0: return new = _RGlyph() writerPen = new.getPen() filterpen = FlattenPen(writerPen, threshold, segmentLines) wrappedPen = PointToSegmentPen(filterpen) aGlyph.drawPoints(wrappedPen) aGlyph.clear() aGlyph.appendGlyph(new) aGlyph.update() return aGlyph def spikeGlyph(aGlyph, segmentLength=20, spikeLength=40, patternFunc=None): """Add narly spikes or dents to the glyph. patternFunc is an optional function which recalculates the offset.""" from math import atan2, sin, cos, pi new = _RGlyph() new.appendGlyph(aGlyph) new.width = aGlyph.width if len(new.contours) == 0: return flattenGlyph(new, segmentLength, segmentLines=True) for contour in new: l = len(contour.points) lastAngle = None for i in range(0, len(contour.points), 2): prev = contour.points[i-1] cur = contour.points[i] next = contour.points[(i+1)%l] angle = atan2(prev.x - next.x, prev.y - next.y) lastAngle = angle if patternFunc is not None: thisSpikeLength = patternFunc(spikeLength) else: thisSpikeLength = spikeLength cur.x -= sin(angle+.5*pi)*thisSpikeLength cur.y -= cos(angle+.5*pi)*thisSpikeLength new.update() aGlyph.clear() aGlyph.appendGlyph(new) aGlyph.update() return aGlyph def halftoneGlyph(aGlyph, invert=False): """Convert the glyph into some sort of halftoning pattern. Measure a bunch of inside/outside points to simulate grayscale levels. Slow. """ print 'halftoneGlyph is running...' grid = {} drawing = {} dataDistance = 10 scan = 2 preload = 0 cellDistance = dataDistance * 5 overshoot = dataDistance * 2 (xMin, yMin, xMax, yMax) = aGlyph.box for x in range(xMin-overshoot, xMax+overshoot, dataDistance): print 'scanning..', x for y in range(yMin-overshoot, yMax+overshoot, dataDistance): if aGlyph.pointInside((x, y)): grid[(x, y)] = True else: grid[(x, y)] = False #print 'gathering data', x, y, grid[(x, y)] print 'analyzing..' for x in range(xMin-overshoot, xMax+overshoot, cellDistance): for y in range(yMin-overshoot, yMax+overshoot, cellDistance): total = preload for scanx in range(-scan, scan): for scany in range(-scan, scan): if grid.get((x+scanx*dataDistance, y+scany*dataDistance)): total += 1 if invert: drawing[(x, y)] = 2*scan**2 - float(total) else: drawing[(x, y)] = float(total) aGlyph.clear() print drawing for (x,y) in drawing.keys(): size = drawing[(x,y)] / float(2*scan**2) * 5 pen = aGlyph.getPen() pen.moveTo((x-size, y-size)) pen.lineTo((x+size, y-size)) pen.lineTo((x+size, y+size)) pen.lineTo((x-size, y+size)) pen.lineTo((x-size, y-size)) pen.closePath() aGlyph.update() if __name__ == "__main__": from robofab.pens.pointPen import PrintingPointPen pp = PrintingPointPen() #pp.beginPath() #pp.addPoint((100, 100)) #pp.endPath() tpp = ThresholdPointPen(pp, threshold=20) tpp.beginPath() #segmentType=None, smooth=False, name=None tpp.addPoint((100, 100), segmentType="line", smooth=True) # section that should be too small tpp.addPoint((100, 102), segmentType="line", smooth=True) tpp.addPoint((200, 200), segmentType="line", smooth=True) # curve section with final point that's far enough, but with offcurves that are under the threshold tpp.addPoint((200, 205), segmentType="curve", smooth=True) tpp.addPoint((300, 295), segmentType="curve", smooth=True) tpp.addPoint((300, 300), segmentType="line", smooth=True) # curve section with final point that is not far enough tpp.addPoint((550, 350), segmentType="curve", smooth=True) tpp.addPoint((360, 760), segmentType="curve", smooth=True) tpp.addPoint((310, 310), segmentType="line", smooth=True) tpp.addPoint((400, 400), segmentType="line", smooth=True) tpp.addPoint((100, 100), segmentType="line", smooth=True) tpp.endPath() # couple of single points with names tpp.beginPath() tpp.addPoint((500, 500), segmentType="move", smooth=True, name="named point") tpp.addPoint((600, 500), segmentType="move", smooth=True, name="named point") tpp.addPoint((601, 501), segmentType="move", smooth=True, name="named point") tpp.endPath() # open path tpp.beginPath() tpp.addPoint((500, 500), segmentType="move", smooth=True) tpp.addPoint((501, 500), segmentType="line", smooth=True) tpp.addPoint((101, 500), segmentType="line", smooth=True) tpp.addPoint((101, 100), segmentType="line", smooth=True) tpp.addPoint((498, 498), segmentType="line", smooth=True) tpp.endPath()
{ "repo_name": "moyogo/robofab", "path": "Lib/robofab/pens/filterPen.py", "copies": "9", "size": "12103", "license": "bsd-3-clause", "hash": -7695697370394359000, "line_mean": 28.7395577396, "line_max": 101, "alpha_frac": 0.6945385442, "autogenerated": false, "ratio": 2.841079812206573, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8035618356406572, "avg_score": null, "num_lines": null }
"""A couple of point pens which return the glyph as a list of basic values.""" from robofab.pens.pointPen import AbstractPointPen class DigestPointPen(AbstractPointPen): """Calculate a digest of all points AND coordinates AND components in a glyph. """ def __init__(self, ignoreSmoothAndName=False): self._data = [] self.ignoreSmoothAndName = ignoreSmoothAndName def beginPath(self): self._data.append('beginPath') def endPath(self): self._data.append('endPath') def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): if self.ignoreSmoothAndName: self._data.append((pt, segmentType)) else: self._data.append((pt, segmentType, smooth, name)) def addComponent(self, baseGlyphName, transformation): t = [] for v in transformation: if int(v) == v: t.append(int(v)) else: t.append(v) self._data.append((baseGlyphName, tuple(t))) def getDigest(self): return tuple(self._data) def getDigestPointsOnly(self, needSort=True): """ Return a tuple with all coordinates of all points, but without smooth info or drawing instructions. For instance if you want to compare 2 glyphs in shape, but not interpolatability. """ points = [] from types import TupleType for item in self._data: if type(item) == TupleType: points.append(item[0]) if needSort: points.sort() return tuple(points) class DigestPointStructurePen(DigestPointPen): """Calculate a digest of the structure of the glyph NOT coordinates NOT values. """ def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): self._data.append(segmentType) def addComponent(self, baseGlyphName, transformation): self._data.append(baseGlyphName) if __name__ == "__main__": """ beginPath ((112, 651), 'line', False, None) ((112, 55), 'line', False, None) ((218, 55), 'line', False, None) ((218, 651), 'line', False, None) endPath """ # a test from robofab.objects.objectsRF import RGlyph g = RGlyph() p = g.getPen() p.moveTo((112, 651)) p.lineTo((112, 55)) p.lineTo((218, 55)) p.lineTo((218, 651)) p.closePath() print g, len(g) digestPen = DigestPointPen() g.drawPoints(digestPen) print print "getDigest", digestPen.getDigest() print print "getDigestPointsOnly", digestPen.getDigestPointsOnly()
{ "repo_name": "moyogo/robofab", "path": "Lib/robofab/pens/digestPen.py", "copies": "9", "size": "2328", "license": "bsd-3-clause", "hash": -7576792175359058000, "line_mean": 20.9716981132, "line_max": 78, "alpha_frac": 0.6894329897, "autogenerated": false, "ratio": 3.0273081924577374, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8216741182157737, "avg_score": null, "num_lines": null }
# A couple of samples using SHBrowseForFolder import sys, os from win32com.shell import shell, shellcon import win32gui # A callback procedure - called by SHBrowseForFolder def BrowseCallbackProc(hwnd, msg, lp, data): if msg== shellcon.BFFM_INITIALIZED: win32gui.SendMessage(hwnd, shellcon.BFFM_SETSELECTION, 1, data) elif msg == shellcon.BFFM_SELCHANGED: # Set the status text of the # For this message, 'lp' is the address of the PIDL. pidl = shell.AddressAsPIDL(lp) try: path = shell.SHGetPathFromIDList(pidl) win32gui.SendMessage(hwnd, shellcon.BFFM_SETSTATUSTEXT, 0, path) except shell.error: # No path for this PIDL pass if __name__=='__main__': # Demonstrate a dialog with the cwd selected as the default - this # must be done via a callback function. flags = shellcon.BIF_STATUSTEXT shell.SHBrowseForFolder(0, # parent HWND None, # root PIDL. "Default of %s" % os.getcwd(), # title flags, # flags BrowseCallbackProc, # callback function os.getcwd() # 'data' param for the callback ) # Browse from this directory down only. # Get the PIDL for the cwd. desktop = shell.SHGetDesktopFolder() cb, pidl, extra = desktop.ParseDisplayName(0, None, os.getcwd()) shell.SHBrowseForFolder(0, # parent HWND pidl, # root PIDL. "From %s down only" % os.getcwd(), # title )
{ "repo_name": "PopCap/GameIdea", "path": "Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/win32comext/shell/demos/browse_for_folder.py", "copies": "47", "size": "1661", "license": "bsd-2-clause", "hash": 221064668351675100, "line_mean": 40.525, "line_max": 76, "alpha_frac": 0.5725466586, "autogenerated": false, "ratio": 3.9453681710213777, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
# A couple of samples using SHBrowseForFolder import sys, os from win32com.shell import shell, shellcon import win32gui # A callback procedure - called by SHBrowseForFolder def BrowseCallbackProc(hwnd, msg, lp, data): if msg== shellcon.BFFM_INITIALIZED: win32gui.SendMessage(hwnd, shellcon.BFFM_SETSELECTION, 1, data) elif msg == shellcon.BFFM_SELCHANGED: # Set the status text of the # For this message, 'lp' is the address of the PIDL. pidl = shell.AddressAsPIDL(lp) try: path = shell.SHGetPathFromIDList(pidl) win32gui.SendMessage(hwnd, shellcon.BFFM_SETSTATUSTEXT, 0, path) except shell.error: # No path for this PIDL pass if __name__=='__main__': # Demonstrate a dialog with the cwd selected as the default - this # must be done via a callback function. flags = shellcon.BIF_STATUSTEXT shell.SHBrowseForFolder(0, # parent HWND None, # root PIDL. "Default of %s" % os.getcwd(), # title flags, # flags BrowseCallbackProc, # callback function os.getcwd() # 'data' param for the callback ) # Browse from this directory down only. # Get the PIDL for the cwd. desktop = shell.SHGetDesktopFolder() cb, pidl, extra = desktop.ParseDisplayName(0, None, os.getcwd()) shell.SHBrowseForFolder(0, # parent HWND pidl, # root PIDL. "From %s down only" % os.getcwd(), # title )
{ "repo_name": "ntuecon/server", "path": "pyenv/Lib/site-packages/win32comext/shell/demos/browse_for_folder.py", "copies": "4", "size": "1701", "license": "bsd-3-clause", "hash": 5427967116793838000, "line_mean": 40.525, "line_max": 76, "alpha_frac": 0.5590828924, "autogenerated": false, "ratio": 4.011792452830188, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.008755270163145198, "num_lines": 40 }
""" A couple of simple tools I'm using everywhere. """ import os import inspect import sys def isNumber(n): try: dummy = float(n) return True except ValueError: return False def good_path(filename, remove=os.path.sep+"core"): """This will return the path to related filename""" return os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))).replace(remove, ""), filename) def good_node_filename(filename): """This should be probably moved to node class... We'll see""" str = apply(os.path.join, tuple(filename.split('/'))) parts = str.split(os.path.sep) if parts.index("modes") == -1: return filename else: return os.path.sep.join(parts[parts.index("modes"):]) def uniq(alist): """ Fastest order preserving doublicates removal from arrays. """ set = {} return [set.setdefault(e,e) for e in alist if e not in set] def opj(path): """Convert paths to the platform-specific separator""" str = apply(os.path.join, tuple(path.split('/'))) if path.startswith('/'): # HACK: on Linux, a leading / gets lost... str = '/' + str return str def SafelyDelete(array, topic): """ Safe removal of item from array """ try: array.remove(topic) except: pass def RGBtoHSV(rgb): hsv = [0,0,0] trgb = list(rgb) trgb.sort() min = trgb[0] max = trgb[2] delta = float(max - min) hsv[2] = max if delta == 0: # r = g = b = 0 # s = 0, v is undefined hsv[1] = 0 hsv[0] = -1 else: hsv[1]=delta / max if rgb[0] == max: hsv[0] = (rgb[1] - rgb[2]) / delta # between yellow & magenta elif rgb[1] == max: hsv[0] = 2 + (rgb[2] - rgb[0] ) / delta # between cyan & yellow else: hsv[0] = 4 + (rgb[0] - rgb[1] ) / delta # between magenta & cyan hsv[0] *= 60 # degrees if hsv[0] < 0: hsv[0] += 360 return hsv def HSVtoRGB(hsv): rgb=[0,0,0] # pass through alpha channel hsv[0]/=60 if hsv[1] == 0: return tuple([hsv[2],hsv[2],hsv[2]]) i = int(hsv[0]) f = hsv[0] - i #Decimal bit of hue p = hsv[2] * (1 - hsv[1]) q = hsv[2] * (1 - hsv[1] * f) t = hsv[2] * (1 - hsv[1] * (1 - f)) if i == 0: rgb[0] = hsv[2] rgb[1] = t rgb[2] = p elif i == 1: rgb[0] = q rgb[1] = hsv[2] rgb[2] = p elif i == 2: rgb[0] = p rgb[1] = hsv[2] rgb[2] = t elif i == 3: rgb[0] = p rgb[1] = q rgb[2] = hsv[2] elif i == 4: rgb[0] = t rgb[1] = p rgb[2] = hsv[2] elif i == 5: rgb[0] = hsv[2] rgb[1] = p rgb[2] = q return tuple(rgb)
{ "repo_name": "gdubost1/shaderman", "path": "core/utils.py", "copies": "4", "size": "2780", "license": "bsd-3-clause", "hash": -3956236393747229000, "line_mean": 22.974137931, "line_max": 102, "alpha_frac": 0.5071942446, "autogenerated": false, "ratio": 2.857142857142857, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.028076309858599113, "num_lines": 116 }
# a couple of support functions which # help with generating Python source. # XXX This module provides a similar, but subtly different, functionality # XXX several times over, which used to be scattered over four modules. # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. import sys, os, inspect, types import py def render_docstr(func, indent_str='', closing_str=''): """ Render a docstring as a string of lines. The argument is either a docstring or an object. Note that we don't use a sequence, since we want the docstring to line up left, regardless of indentation. The shorter triple quotes are choosen automatically. The result is returned as a 1-tuple.""" if not isinstance(func, str): doc = func.__doc__ else: doc = func if doc is None: return None doc = doc.replace('\\', r'\\') compare = [] for q in '"""', "'''": txt = indent_str + q + doc.replace(q[0], "\\"+q[0]) + q + closing_str compare.append(txt) doc, doc2 = compare doc = (doc, doc2)[len(doc2) < len(doc)] return doc class NiceCompile(object): """ Compiling parameterized strings in a way that debuggers are happy. We provide correct line numbers and a real __file__ attribute. """ def __init__(self, namespace_or_filename): if type(namespace_or_filename) is str: srcname = namespace_or_filename else: srcname = namespace_or_filename.get('__file__') if not srcname: # assume the module was executed from the # command line. srcname = os.path.abspath(sys.argv[-1]) self.srcname = srcname if srcname.endswith('.pyc') or srcname.endswith('.pyo'): srcname = srcname[:-1] if os.path.exists(srcname): self.srcname = srcname self.srctext = file(srcname).read() else: # missing source, what to do? self.srctext = None def __call__(self, src, args=None): """ instance NiceCompile (src, args) -- formats src with args and returns a code object ready for exec. Instead of <string>, the code object has correct co_filename and line numbers. Indentation is automatically corrected. """ if self.srctext: try: p = self.srctext.index(src) except ValueError: msg = "Source text not found in %s - use a raw string" % self.srcname raise ValueError(msg) prelines = self.srctext[:p].count("\n") + 1 else: prelines = 0 # adjust indented def for line in src.split('\n'): content = line.strip() if content and not content.startswith('#'): break # see if first line is indented if line and line[0].isspace(): # fake a block prelines -= 1 src = 'if 1:\n' + src if args is not None: src = '\n' * prelines + src % args else: src = '\n' * prelines + src c = compile(src, self.srcname, "exec") # preserve the arguments of the code in an attribute # of the code's co_filename if self.srcname: srcname = MyStr(self.srcname) if args is not None: srcname.__sourceargs__ = args c = newcode_withfilename(c, srcname) return c def getsource(object): """ similar to inspect.getsource, but trying to find the parameters of formatting generated methods and functions. """ name = inspect.getfile(object) if hasattr(name, '__source__'): src = str(name.__source__) else: try: src = inspect.getsource(object) except Exception: # catch IOError, IndentationError, and also rarely return None # some other exceptions like IndexError if hasattr(name, "__sourceargs__"): return src % name.__sourceargs__ return src ## the following is stolen from py.code.source.py for now. ## XXX discuss whether and how to put this functionality ## into py.code.source. # # various helper functions # class MyStr(str): """ custom string which allows adding attributes. """ def newcode(fromcode, **kwargs): names = [x for x in dir(fromcode) if x[:3] == 'co_'] for name in names: if name not in kwargs: kwargs[name] = getattr(fromcode, name) return types.CodeType( kwargs['co_argcount'], kwargs['co_nlocals'], kwargs['co_stacksize'], kwargs['co_flags'], kwargs['co_code'], kwargs['co_consts'], kwargs['co_names'], kwargs['co_varnames'], kwargs['co_filename'], kwargs['co_name'], kwargs['co_firstlineno'], kwargs['co_lnotab'], kwargs['co_freevars'], kwargs['co_cellvars'], ) def newcode_withfilename(co, co_filename): newconstlist = [] cotype = type(co) for c in co.co_consts: if isinstance(c, cotype): c = newcode_withfilename(c, co_filename) newconstlist.append(c) return newcode(co, co_consts = tuple(newconstlist), co_filename = co_filename) # ____________________________________________________________ import __future__ def compile2(source, filename='', mode='exec', flags= __future__.generators.compiler_flag, dont_inherit=0): """ A version of compile() that caches the code objects it returns. It uses py.code.compile() to allow the source to be displayed in tracebacks. """ key = (source, filename, mode, flags) try: co = compile2_cache[key] #print "***** duplicate code ******* " #print source except KeyError: #if DEBUG: co = py.code.compile(source, filename, mode, flags) #else: # co = compile(source, filename, mode, flags) compile2_cache[key] = co return co compile2_cache = {} # ____________________________________________________________ def compile_template(source, resultname): """Compiles the source code (a string or a list/generator of lines) which should be a definition for a function named 'resultname'. The caller's global dict and local variable bindings are captured. """ if not isinstance(source, py.code.Source): if isinstance(source, str): lines = [source] else: lines = list(source) lines.append('') source = py.code.Source('\n'.join(lines)) caller = sys._getframe(1) locals = caller.f_locals if locals is caller.f_globals: localnames = [] else: localnames = locals.keys() localnames.sort() values = [locals[key] for key in localnames] source = source.putaround( before = "def container(%s):" % (', '.join(localnames),), after = "# no unindent\n return %s" % resultname) d = {} exec source.compile() in caller.f_globals, d container = d['container'] return container(*values) # ____________________________________________________________ def func_with_new_name(func, newname, globals=None): """Make a renamed copy of a function.""" if globals is None: globals = func.func_globals f = types.FunctionType(func.func_code, globals, newname, func.func_defaults, func.func_closure) if func.func_dict: f.func_dict = {} f.func_dict.update(func.func_dict) f.func_doc = func.func_doc return f def func_renamer(newname): """A function decorator which changes the name of a function.""" def decorate(func): return func_with_new_name(func, newname) return decorate PY_IDENTIFIER = ''.join([(('0' <= chr(i) <= '9' or 'a' <= chr(i) <= 'z' or 'A' <= chr(i) <= 'Z') and chr(i) or '_') for i in range(256)]) PY_IDENTIFIER_MAX = 120 def valid_identifier(stuff): stuff = str(stuff).translate(PY_IDENTIFIER) if not stuff or ('0' <= stuff[0] <= '9'): stuff = '_' + stuff return stuff[:PY_IDENTIFIER_MAX] CO_VARARGS = 0x0004 CO_VARKEYWORDS = 0x0008 def has_varargs(func): func = getattr(func, 'func_code', func) return (func.co_flags & CO_VARARGS) != 0 def has_varkeywords(func): func = getattr(func, 'func_code', func) return (func.co_flags & CO_VARKEYWORDS) != 0 def nice_repr_for_func(fn, name=None): mod = getattr(fn, '__module__', None) if name is None: name = getattr(fn, '__name__', None) cls = getattr(fn, 'class_', None) if name is not None and cls is not None: name = "%s.%s" % (cls.__name__, name) try: firstlineno = fn.func_code.co_firstlineno except AttributeError: firstlineno = -1 return "(%s:%d)%s" % (mod or '?', firstlineno, name or 'UNKNOWN') def rpython_wrapper(f, template, templateargs=None, **globaldict): """ We cannot simply wrap the function using *args, **kwds, because it's not RPython. Instead, we generate a function from ``template`` with exactly the same argument list. """ if templateargs is None: templateargs = {} srcargs, srcvarargs, srckeywords, defaults = inspect.getargspec(f) assert not srcvarargs, '*args not supported by rpython_wrapper' assert not srckeywords, '**kwargs not supported by rpython_wrapper' # arglist = ', '.join(srcargs) templateargs.update(name=f.func_name, arglist=arglist, original=f.func_name+'_original') src = template.format(**templateargs) src = py.code.Source(src) # globaldict[f.func_name + '_original'] = f exec src.compile() in globaldict result = globaldict[f.func_name] result.func_defaults = f.func_defaults result.func_dict.update(f.func_dict) return result
{ "repo_name": "oblique-labs/pyVM", "path": "rpython/tool/sourcetools.py", "copies": "2", "size": "10132", "license": "mit", "hash": -2238182091290716200, "line_mean": 33.2297297297, "line_max": 85, "alpha_frac": 0.5724437426, "autogenerated": false, "ratio": 3.942412451361868, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5514856193961868, "avg_score": null, "num_lines": null }
"""Acoustic Brainz Genre dataset .. admonition:: Dataset Info :class: dropdown The AcousticBrainz Genre Dataset consists of four datasets of genre annotations and music features extracted from audio suited for evaluation of hierarchical multi-label genre classification systems. Description about the music features can be found here: https://essentia.upf.edu/streaming_extractor_music.html The datasets are used within the MediaEval AcousticBrainz Genre Task. The task is focused on content-based music genre recognition using genre annotations from multiple sources and large-scale music features data available in the AcousticBrainz database. The goal of our task is to explore how the same music pieces can be annotated differently by different communities following different genre taxonomies, and how this should be addressed by content-based genre r ecognition systems. We provide four datasets containing genre and subgenre annotations extracted from four different online metadata sources: - AllMusic and Discogs are based on editorial metadata databases maintained by music experts and enthusiasts. These sources contain explicit genre/subgenre annotations of music releases (albums) following a predefined genre namespace and taxonomy. We propagated release-level annotations to recordings (tracks) in AcousticBrainz to build the datasets. - Lastfm and Tagtraum are based on collaborative music tagging platforms with large amounts of genre labels provided by their users for music recordings (tracks). We have automatically inferred a genre/subgenre taxonomy and annotations from these labels. For details on format and contents, please refer to the data webpage. Note, that the AllMusic ground-truth annotations are distributed separately at https://zenodo.org/record/2554044. If you use the MediaEval AcousticBrainz Genre dataset or part of it, please cite our ISMIR 2019 overview paper: .. code-block:: latex Bogdanov, D., Porter A., Schreiber H., Urbano J., & Oramas S. (2019). The AcousticBrainz Genre Dataset: Multi-Source, Multi-Level, Multi-Label, and Large-Scale. 20th International Society for Music Information Retrieval Conference (ISMIR 2019). This work is partially supported by the European Union’s Horizon 2020 research and innovation programme under grant agreement No 688382 AudioCommons. """ import json from mirdata import download_utils, core, io from mirdata import jams_utils NAME = "acousticbrainz_genre" BIBTEX = """ @inproceedings{bogdanov2019acousticbrainz, title={The AcousticBrainz genre dataset: Multi-source, multi-level, multi-label, and large-scale}, author={Bogdanov, Dmitry and Porter, Alastair and Schreiber, Hendrik and Urbano, Juli{\'a}n and Oramas, Sergio}, booktitle={Proceedings of the 20th Conference of the International Society for Music Information Retrieval (ISMIR 2019): 2019 Nov 4-8; Delft, The Netherlands.[Canada]: ISMIR; 2019.}, year={2019}, organization={International Society for Music Information Retrieval (ISMIR)} } """ INDEXES = { "default": "1.0", "test": "sample", "1.0": core.Index( filename="acousticbrainz_genre_index_1.0.json", url="https://zenodo.org/record/4698408/files/acousticbrainz_genre_index_1.0.json.zip?download=1", checksum="ee2837b04d8dd6ab0507f5b975314b7e", ), "sample": core.Index(filename="acousticbrainz_genre_index_sample.json"), } REMOTES = { "index": download_utils.RemoteFileMetadata( filename="acousticbrainz_genre_index.json.zip", url="https://zenodo.org/record/4298580/files/acousticbrainz_genre_index.json.zip?download=1", checksum="810f1c003f53cbe58002ba96e6d4d138", ), "validation-01": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-validation-01234567.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-validation-01234567.tar.bz2?download=1", checksum="f21f9c5e398713139cca9790b656faf9", destination_dir="acousticbrainz-mediaeval-validation", unpack_directories=["acousticbrainz-mediaeval-validation"], ), "validation-89": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-validation-89abcdef.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-validation-89abcdef.tar.bz2?download=1", checksum="34f47394ac6d8face4399f48e2b98ebe", destination_dir="acousticbrainz-mediaeval-validation", unpack_directories=["acousticbrainz-mediaeval-validation"], ), "train-01": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features--train-01.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features--train-01.tar.bz2?download=1", checksum="db7157b5112022d609652dd21c632090", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), "train-23": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-train-23.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-train-23.tar.bz2?download=1", checksum="79581967a1be5c52e83be21261d1ef6c", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), "train-45": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-train-45.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-train-45.tar.bz2?download=1", checksum="0e48fa319fa48e5cf95eea8118d2e882", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), "train-67": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-train-67.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-train-67.tar.bz2?download=1", checksum="22ca7f1fea8a86459b7fda4530f00070", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), "train-89": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-train-89.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-train-89.tar.bz2?download=1", checksum="c6e4a2ef1b0e8ed535197b868f8c7302", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), "train-ab": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-train-ab.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-train-ab.tar.bz2?download=1", checksum="513d5f306dd4f3799c137423ee444051", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), "train-cd": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-train-cd.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-train-cd.tar.bz2?download=1", checksum="422d75d70d583decec0b2761865092a7", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), "train-ef": download_utils.RemoteFileMetadata( filename="acousticbrainz-mediaeval-features-train-ef.tar.bz2", url="https://zenodo.org/record/2553414/files/acousticbrainz-mediaeval-features-train-ef.tar.bz2?download=1", checksum="021ab25a5fd1b020521824e7fce9c775", destination_dir="acousticbrainz-mediaeval-train", unpack_directories=["acousticbrainz-mediaeval-train"], ), } LICENSE_INFO = """ This dataset is composed of 4 subdatasets. Three of them are Creative Commons Attribution Non Commercial Share Alike 4.0 International and the other one is non-comercial. Details about which license correspond to each subdataset can be found in the following websites: * https://zenodo.org/record/2553414#.X_nxnOn7RUI * https://zenodo.org/record/2554044#.X_nw2en7RUI """ class Track(core.Track): """AcousticBrainz Genre Dataset track class Args: track_id (str): track id of the track data_home (str): Local path where the dataset is stored. If `None`, looks for the data in the default directory, `~/mir_datasets` Attributes: track_id (str): track id genre (list): human-labeled genre and subgenres list mbid (str): musicbrainz id mbid_group (str): musicbrainz id group artist (list): the track's artist/s title (list): the track's title date (list): the track's release date/s filename (str): the track's filename album (list): the track's album/s track_number (list): the track number/s tonal (dict): dictionary of acousticbrainz tonal features low_level (dict): dictionary of acousticbrainz low-level features rhythm (dict): dictionary of acousticbrainz rhythm features Cached Properties: acousticbrainz_metadata (dict): dictionary of metadata provided by AcousticBrainz """ def __init__( self, track_id, data_home, dataset_name, index, metadata, ): super().__init__( track_id, data_home, dataset_name, index, metadata, ) self.path = self.get_path("data") self.genre = [genre for genre in self.track_id.split("#")[4:] if genre != ""] self.mbid = self.track_id.split("#")[2] self.mbid_group = self.track_id.split("#")[3] self.split = self.track_id.split("#")[1] # Metadata @property def artist(self): """metadata artist annotation Returns: list: artist """ return self.acousticbrainz_metadata["metadata"]["tags"]["artist"] @property def title(self): """metadata title annotation Returns: list: title """ return self.acousticbrainz_metadata["metadata"]["tags"]["title"] @property def date(self): """metadata date annotation Returns: list: date """ return self.acousticbrainz_metadata["metadata"]["tags"]["date"] @property def file_name(self): """metadata file_name annotation Returns: str: file name """ return self.acousticbrainz_metadata["metadata"]["tags"]["file_name"] @property def album(self): """metadata album annotation Returns: list: album """ return self.acousticbrainz_metadata["metadata"]["tags"]["album"] @property def tracknumber(self): """metadata tracknumber annotation Returns: list: tracknumber """ return self.acousticbrainz_metadata["metadata"]["tags"]["tracknumber"] @property def tonal(self): """tonal features Returns: dict: .. toggle:: - 'tuning_frequency': estimated tuning frequency [Hz]. Algorithms: TuningFrequency - 'tuning_nontempered_energy_ratio' and 'tuning_equal_tempered_deviation' - 'hpcp', 'thpcp': 32-dimensional harmonic pitch class profile (HPCP) and its transposed version. Algorithms: HPCP - 'hpcp_entropy': Shannon entropy of a HPCP vector. Algorithms: Entropy - 'key_key', 'key_scale': Global key feature. Algorithms: Key - 'chords_key', 'chords_scale': Global key extracted from chords detection. - 'chords_strength', 'chords_histogram': : strength of estimated chords and normalized histogram of their progression; Algorithms: ChordsDetection, ChordsDescriptors - 'chords_changes_rate', 'chords_number_rate': chords change rate in the progression; ratio of different chords from the total number of chords in the progression; Algorithms: ChordsDetection, ChordsDescriptors """ return self.acousticbrainz_metadata["tonal"] @property def low_level(self): """low_level track descriptors. Returns: dict: .. toggle:: - 'average_loudness': dynamic range descriptor. It rescales average loudness, computed on 2sec windows with 1 sec overlap, into the [0,1] interval. The value of 0 corresponds to signals with large dynamic range, 1 corresponds to signal with little dynamic range. Algorithms: Loudness - 'dynamic_complexity': dynamic complexity computed on 2sec windows with 1sec overlap. Algorithms: DynamicComplexity - 'silence_rate_20dB', 'silence_rate_30dB', 'silence_rate_60dB': rate of silent frames in a signal for thresholds of 20, 30, and 60 dBs. Algorithms: SilenceRate - 'spectral_rms': spectral RMS. Algorithms: RMS - 'spectral_flux': spectral flux of a signal computed using L2-norm. Algorithms: Flux - 'spectral_centroid', 'spectral_kurtosis', 'spectral_spread', 'spectral_skewness': centroid and central moments statistics describing the spectral shape. Algorithms: Centroid, CentralMoments - 'spectral_rolloff': the roll-off frequency of a spectrum. Algorithms: RollOff - 'spectral_decrease': spectral decrease. Algorithms: Decrease - 'hfc': high frequency content descriptor as proposed by Masri. Algorithms: HFC - 'zerocrossingrate' zero-crossing rate. Algorithms: ZeroCrossingRate - 'spectral_energy': spectral energy. Algorithms: Energy - 'spectral_energyband_low', 'spectral_energyband_middle_low', 'spectral_energyband_middle_high', - 'spectral_energyband_high': spectral energy in frequency bands [20Hz, 150Hz], [150Hz, 800Hz], [800Hz, 4kHz], and [4kHz, 20kHz]. Algorithms EnergyBand - 'barkbands': spectral energy in 27 Bark bands. Algorithms: BarkBands - 'melbands': spectral energy in 40 mel bands. Algorithms: MFCC - 'erbbands': spectral energy in 40 ERB bands. Algorithms: ERBBands - 'mfcc': the first 13 mel frequency cepstrum coefficients. See algorithm: MFCC - 'gfcc': the first 13 gammatone feature cepstrum coefficients. Algorithms: GFCC - 'barkbands_crest', 'barkbands_flatness_db': crest and flatness computed over energies in Bark bands. Algorithms: Crest, FlatnessDB - 'barkbands_kurtosis', 'barkbands_skewness', 'barkbands_spread': central moments statistics over energies in Bark bands. Algorithms: CentralMoments - 'melbands_crest', 'melbands_flatness_db': crest and flatness computed over energies in mel bands. Algorithms: Crest, FlatnessDB - 'melbands_kurtosis', 'melbands_skewness', 'melbands_spread': central moments statistics over energies in mel bands. Algorithms: CentralMoments - 'erbbands_crest', 'erbbands_flatness_db': crest and flatness computed over energies in ERB bands. Algorithms: Crest, FlatnessDB - 'erbbands_kurtosis', 'erbbands_skewness', 'erbbands_spread': central moments statistics over energies in ERB bands. Algorithms: CentralMoments - 'dissonance': sensory dissonance of a spectrum. Algorithms: Dissonance - 'spectral_entropy': Shannon entropy of a spectrum. Algorithms: Entropy - 'pitch_salience': pitch salience of a spectrum. Algorithms: PitchSalience - 'spectral_complexity': spectral complexity. Algorithms: SpectralComplexity - 'spectral_contrast_coeffs', 'spectral_contrast_valleys': spectral contrast features. Algorithms: SpectralContrast """ return self.acousticbrainz_metadata["lowlevel"] @property def rhythm(self): """rhythm essentia extractor descriptors Returns: dict: .. toggle:: - 'beats_position': time positions [sec] of detected beats using beat tracking algorithm by Degara et al., 2012. Algorithms: RhythmExtractor2013, BeatTrackerDegara - 'beats_count': number of detected beats - 'bpm': BPM value according to detected beats - 'bpm_histogram_first_peak_bpm', 'bpm_histogram_first_peak_spread', 'bpm_histogram_first_peak_weight', - 'bpm_histogram_second_peak_bpm', 'bpm_histogram_second_peak_spread', 'bpm_histogram_second_peak_weight': descriptors characterizing highest and second highest peak of the BPM histogram. Algorithms: BpmHistogramDescriptors - 'beats_loudness', 'beats_loudness_band_ratio': spectral energy computed on beats segments of audio across the whole spectrum, and ratios of energy in 6 frequency bands. Algorithms: BeatsLoudness, SingleBeatLoudness - 'onset_rate': number of detected onsets per second. Algorithms: OnsetRate - 'danceability': danceability estimate. Algorithms: Danceability """ return self.acousticbrainz_metadata["rhythm"] @core.cached_property def acousticbrainz_metadata(self): return load_extractor(self.path) def to_jams(self): """the track's data in jams format Returns: jams.JAMS: return track data in jam format """ return jams_utils.jams_converter( metadata={ "features": load_extractor(self.path), "duration": self.acousticbrainz_metadata["metadata"][ "audio_properties" ]["length"], } ) @io.coerce_to_string_io def load_extractor(fhandle): """Load a AcousticBrainz Dataset json file with all the features and metadata. Args: fhandle (str or file-like): path or file-like object pointing to a json file Returns: * np.ndarray - the mono audio signal * float - The sample rate of the audio file """ return json.load(fhandle) @core.docstring_inherit(core.Dataset) class Dataset(core.Dataset): """ The acousticbrainz genre dataset """ def __init__(self, data_home=None, version="default"): super().__init__( data_home, version, name=NAME, track_class=Track, bibtex=BIBTEX, indexes=INDEXES, remotes=REMOTES, license_info=LICENSE_INFO, ) @core.copy_docs(load_extractor) def load_extractor(self, *args, **kwargs): return load_extractor(*args, **kwargs) def filter_index(self, search_key): """Load from AcousticBrainz genre dataset the indexes that match with search_key. Args: search_key (str): regex to match with folds, mbid or genres Returns: dict: {`track_id`: track data} """ acousticbrainz_genre_data = { k: v for k, v in self._index["tracks"].items() if search_key in k } return acousticbrainz_genre_data def load_all_train(self): """Load from AcousticBrainz genre dataset the tracks that are used for training across the four different datasets. Returns: dict: {`track_id`: track data} """ return self.filter_index("#train#") def load_all_validation(self): """Load from AcousticBrainz genre dataset the tracks that are used for validating across the four different datasets. Returns: dict: {`track_id`: track data} """ return self.filter_index("#validation#") def load_tagtraum_validation(self): """Load from AcousticBrainz genre dataset the tracks that are used for validating in tagtraum dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("tagtraum#validation#") def load_tagtraum_train(self): """Load from AcousticBrainz genre dataset the tracks that are used for training in tagtraum dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("tagtraum#train#") def load_allmusic_train(self): """Load from AcousticBrainz genre dataset the tracks that are used for validation in allmusic dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("allmusic#train#") def load_allmusic_validation(self): """Load from AcousticBrainz genre dataset the tracks that are used for validation in allmusic dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("allmusic#validation#") def load_lastfm_train(self): """Load from AcousticBrainz genre dataset the tracks that are used for training in lastfm dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("lastfm#train#") def load_lastfm_validation(self): """Load from AcousticBrainz genre dataset the tracks that are used for validation in lastfm dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("lastfm#validation#") def load_discogs_train(self): """Load from AcousticBrainz genre dataset the tracks that are used for training in discogs dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("allmusic#train#") def load_discogs_validation(self): """Load from AcousticBrainz genre dataset the tracks that are used for validation in tagtraum dataset. Returns: dict: {`track_id`: track data} """ return self.filter_index("allmusic#validation#")
{ "repo_name": "mir-dataset-loaders/mirdata", "path": "mirdata/datasets/acousticbrainz_genre.py", "copies": "1", "size": "22548", "license": "bsd-3-clause", "hash": 1166161674245937200, "line_mean": 42.1089866157, "line_max": 184, "alpha_frac": 0.6630000887, "autogenerated": false, "ratio": 3.7702341137123745, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.99190909159879, "avg_score": 0.002828657284894632, "num_lines": 523 }
"""ACP Inheritance Settings Class""" from fmcapi.api_objects.apiclasstemplate import APIClassTemplate from .accesspolicies import AccessPolicies import logging class InheritanceSettings(APIClassTemplate): """The InheritanceSettings Object in the FMC.""" VALID_JSON_DATA = [] VALID_FOR_KWARGS = VALID_JSON_DATA + [ "id", "acp_id", "acp_name", "device_id", "device_name", "base_policy_id", ] PREFIX_URL = "/policy/accesspolicies" REQUIRED_FOR_PUT = ["acp_id", "id", "base_policy_id"] REQUIRED_FOR_GET = ["acp_id"] FIRST_SUPPORTED_FMC_VERSION = "6.5" def __init__(self, fmc, **kwargs): """ Initialize InheritanceSettings object. Set self.type to "AccessPolicyInheritanceSettings", parse the kwargs, and set up the self.URL. Note: The InheritanceSettings API is a bit of a weird API. The construction of the API URI needs two IDs. According to the API documentation you need the ContainerID and the objectId where the later is defined as "Unique identifier of the Access Policy Inheritance Setting". These two values are actually the same value, specifically the ID of the ACP for which you are changing the inheritance. The third ID that is required for this API, which we have called the base_policy_id to align with the API documentation, is the ID of ACP that you want to be the parent. An example of use would be: fmc_integrate = InheritanceSettings( fmc=fmc_session, acp_id=child_policy["id"], id=child_policy["id"], base_policy_id=parent_policy["id"], ) fmc_integrate.put() :param fmc (object): FMC object :param **kwargs: Any other values passed during instantiation. :return: None """ logging.debug("In __init__() for InheritanceSettings class.") super().__init__(fmc, **kwargs) self.parse_kwargs(**kwargs) self.type = "AccessPolicyInheritanceSettings" self.URL = f"{self.URL}{self.URL_SUFFIX}" def parse_kwargs(self, **kwargs): """ Parse the kwargs and set self variables to match. :return: None """ super().parse_kwargs(**kwargs) logging.debug("In parse_kwargs() for InheritanceSettings class.") if "acp_id" in kwargs: self.acp(acp_id=kwargs["acp_id"]) if "acp_name" in kwargs: self.acp(name=kwargs["acp_name"]) if "device_id" in kwargs: self.device(id=kwargs["device_id"]) if "device_name" in kwargs: self.device(name=kwargs["device_name"]) def acp(self, name="", acp_id=""): """ Associate an AccessPolicies object with this InheritanceSettings object. :param name: (str) Name of ACP. :param id: (str) ID of ACP. :return: None """ # either name or id of the ACP should be given logging.debug("In acp() for InheritanceSettings class.") if acp_id != "": self.acp_id = acp_id self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.acp_id}/inheritancesettings" self.acp_added_to_url = True elif name != "": acp1 = AccessPolicies(fmc=self.fmc) acp1.get(name=name) if "id" in acp1.__dict__: self.acp_id = acp1.id self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.acp_id}/inheritancesettings" self.acp_added_to_url = True else: logging.warning( f'Access Control Policy "{name}" not found. Cannot configure acp for InheritanceSettings.' ) else: logging.error("No accessPolicy name or id was provided.") def format_data(self, filter_query=""): """ Gather all the data in preparation for sending to API in JSON format. :param filter_query: (str) 'all' or 'kwargs' :return: (dict) json_data """ logging.debug("In format_data() for InheritanceSettings class.") return { "type": "AccessPolicyInheritanceSetting", "id": self.acp_id, "basePolicy": {"type": "AccessPolicy", "id": self.base_policy_id}, } def post(self): """POST method for InheritanceSettings not supported.""" logging.info("API POST method for InheritanceSettings not supported.") pass def delete(self, **kwargs): """DELETE method for InheritanceSettings not supported.""" logging.info("API DELETE method for InheritanceSettings not supported.") pass
{ "repo_name": "daxm/fmcapi", "path": "fmcapi/api_objects/policy_services/inheritancesettings.py", "copies": "1", "size": "4743", "license": "bsd-3-clause", "hash": 492906868235054850, "line_mean": 36.944, "line_max": 115, "alpha_frac": 0.5989879823, "autogenerated": false, "ratio": 3.9857142857142858, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0008700954633735447, "num_lines": 125 }
#Acq_IncClk.py # C:\Program Files\National Instruments\NI-DAQ\Examples\DAQmx ANSI C\Analog In\Measure Voltage\Acq-Int Clk\Acq-IntClk.c import ctypes import numpy from time import time, strftime nidaq = ctypes.windll.nicaiu # load the DLL ######################### ### Setting parameters # Voltage limit: record between -voltlimit...+voltlimit voltlimit = 2.0 ############################## # Setup some typedefs and constants # to correspond with values in # C:\Program Files\National Instruments\NI-DAQ\DAQmx ANSI C Dev\include\NIDAQmx.h # the typedefs int32 = ctypes.c_long uInt32 = ctypes.c_ulong uInt64 = ctypes.c_ulonglong float64 = ctypes.c_double TaskHandle = uInt32 # the constants DAQmx_Val_Cfg_Default = int32(-1) DAQmx_Val_Volts = 10348 DAQmx_Val_Rising = 10280 DAQmx_Val_FiniteSamps = 10178 DAQmx_Val_GroupByChannel = 0 ############################## def CHK(err): """a simple error checking routine""" if err < 0: buf_size = 100 buf = ctypes.create_string_buffer('\000' * buf_size) nidaq.DAQmxGetErrorString(err,ctypes.byref(buf),buf_size) raise RuntimeError('nidaq call failed with error %d: %s'%(err,repr(buf.value))) # initialize variables taskHandle = TaskHandle(0) class AIChannel: def __init__(self, nidaq, device, channel, voltlimit, max_num_samples): self.taskHandle = TaskHandle(0) self.nidaq = nidaq self.max_num_samples = max_num_samples self.voltlimit = voltlimit self.data = numpy.zeros((self.max_num_samples,),dtype=numpy.float64) # now, on with the program CHK(self.nidaq.DAQmxCreateTask("",ctypes.byref(self.taskHandle))) CHK(self.nidaq.DAQmxCreateAIVoltageChan(self.taskHandle,"Dev2/ai0","", DAQmx_Val_Cfg_Default, float64(-self.voltlimit),float64(self.voltlimit), DAQmx_Val_Volts,None)) def startTask(self): CHK(self.nidaq.DAQmxStartTask(self.taskHandle)) self.read = int32() def readValue(self): CHK(self.nidaq.DAQmxReadAnalogF64(self.taskHandle,self.max_num_samples,float64(10.0), DAQmx_Val_GroupByChannel,self.data.ctypes.data, self.max_num_samples,ctypes.byref(self.read),None)) return self.data # Setting up input chan = AIChannel(nidaq, "Dev2", "ai0", voltlimit, 1) chan.startTask() # Setting up output file datafile = "errorsig_%s.log" %(strftime("%y%m%d_%H%M%S")) out = file(datafile, 'a') out.write("#Time(UnixTime) Voltage(V)\n") # Do logging until stopped by Ctrl-C while True: try: now = time() errorsignal = chan.readValue() result = numpy.array([[now, errorsignal]]) numpy.savetxt(out, result) print "%f / %f V " %(now, errorsignal) except (KeyboardInterrupt): break out.close()
{ "repo_name": "UltracoldAtomsLab/labhardware", "path": "projects/voltrecord/voltrecord.py", "copies": "2", "size": "2911", "license": "mit", "hash": -6959669874879218000, "line_mean": 32.4712643678, "line_max": 119, "alpha_frac": 0.6310546204, "autogenerated": false, "ratio": 3.1710239651416123, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48020785855416126, "avg_score": null, "num_lines": null }
"""Acquia API task queue resource.""" import logging import re import requests_cache import time from datetime import datetime from datetime import timedelta from acapi import exceptions from acapi.resources.acquiaresource import AcquiaResource LOGGER = logging.getLogger("acapi.resources.task") class Task(AcquiaResource): """Task queue resource.""" #: Task polling interval in seconds. POLL_INTERVAL = 3 #: Valid task properties valid_keys = [ "body", "completed", "cookie", "created", "description", "hidden", "id", "percentage", "queue", "received", "recipient", "result", "sender", "started", "state", ] def __init__(self, uri, auth, data=None, hack_uri=True): """Constructor. Parameters ---------- uri : str The base URI for the resource. auth : tuple The authentication credentials to use for the request. data : dict Raw data from ACAPI. hack_uri : bool Hack the URI so it is valid? """ if hack_uri: uri = self.mangle_uri(uri, data) self.loops = 0 super(Task, self).__init__(uri, auth, data) def mangle_uri(self, uri, task_data): """Generate a URI for a task based on JSON task object. Parameters ---------- task_data : dict Raw task data from ACAPI. Returns ------- Task Task object. """ task_id = int(task_data["id"]) pattern = re.compile(r"/sites/([a-z\:0-9]+)(/.*)?") task_uri = pattern.sub(r"/sites/\g<1>/tasks/{}".format(task_id), uri) return task_uri def pending(self): """Check if a task is still pending. Returns ------- bool Is the task still pending completion? """ # Ensure we don't have stale data # Disable caching so we get the real response with requests_cache.disabled(): task = self.request() self.data = task return task["completed"] is None def wait(self, timeout=1800): """Wait for a task to finish executing. Parameters ---------- timeout : int The maximum number of seconds to wait for the task to complete. Returns ------- Task Task object. """ start = datetime.now() max_time = start + timedelta(seconds=timeout) while self.pending(): # Ensure the timeout hasn't been exceeded. if datetime.now() >= max_time: msg = "Time out exceeded while waiting for {tid}".format( tid=self.data["id"] ) raise exceptions.AcquiaCloudTimeoutError(msg, self.data) time.sleep(self.POLL_INTERVAL) # Grab the cached response task = self.get() if "done" != task["state"]: raise exceptions.AcquiaCloudTaskFailedException( "Task {task_id} failed".format(task_id=task["id"]), task ) end = datetime.now() delta = end - start LOGGER.info("Waited %.2f seconds for task to complete", delta.total_seconds()) return self
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/task.py", "copies": "1", "size": "3398", "license": "mit", "hash": -4467947417074981000, "line_mean": 23.6231884058, "line_max": 86, "alpha_frac": 0.5288404944, "autogenerated": false, "ratio": 4.453473132372215, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00008329168748958853, "num_lines": 138 }
""" Acquia Cloud API client. """ import os import requests_cache import acapi.exceptions from acapi.resources.site import Site from acapi.resources.sitelist import SiteList from acapi.resources.user import User class Client(object): """A Client for accessing the Acquia Cloud API.""" def __init__( self, user=None, token=None, realm="prod", endpoint="https://cloudapi.acquia.com/v1", cache=600, ): """Create an Acquia Cloud API REST client. Parameters ---------- user : str Acquia Cloud API username. token : str Acquia Cloud API user token. realm : str Acquia Cloud API realm (defaults to 'prod'). endpoint : str Base Acquia Cloud API endpoint URL. cache: int How long API responses should be cached for. """ if not user or not token: user, token = self.__find_credentials() if not user or not token: msg = "Credentials not provided" raise acapi.exceptions.AcquiaCloudException(msg) self.auth = (user, token) self.realm = realm self.endpoint = endpoint if cache is not None: requests_cache.install_cache( cache_name="acapi", backend="memory", expire_after=cache ) def generate_uri(self, path): """Generate a URI for a ACAPI request. Parameters ---------- path : str The path component of the URI Returns ------- str The generates URI. """ uri = "{endpoint}/{path}".format(endpoint=self.endpoint, path=path) return uri def site(self, name): """Retrieve a site object. Parameters ---------- name : str The Acquia site/subscription name to look up. Returns ------- Site The site object. """ namespace = "sites/%s:%s" % (self.realm, name) uri = self.generate_uri(namespace) site = Site(uri, self.auth) return site def sites(self): """Retrieve a list of available sites. Returns ------- SiteList dict of sites keyed by site name. """ sites = SiteList(self.endpoint, self.auth) return sites def user(self): """Retrieve the currently authenticated Cloud API user.""" user = User(self.generate_uri("me"), self.auth) return user def __find_credentials(self): """Check environment variables for API credentials.""" user = os.environ.get("ACQUIA_CLOUD_API_USER") token = os.environ.get("ACQUIA_CLOUD_API_TOKEN") return user, token
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/client.py", "copies": "1", "size": "2839", "license": "mit", "hash": -8803530759376947000, "line_mean": 24.5765765766, "line_max": 75, "alpha_frac": 0.5435012328, "autogenerated": false, "ratio": 4.499207606973059, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 111 }
""" Acquia Cloud API database list resource. """ import re from acapi.resources.acquialist import AcquiaList from acapi.resources.database import Database class DatabaseList(AcquiaList): """Dictionary of Acquia Cloud API database resources keyed by name.""" def __init__(self, base_uri, auth, *args, **kwargs): """Constructor.""" super(DatabaseList, self).__init__(base_uri, auth, *args, **kwargs) self.fetch() def create(self, name): """Create a new database. Parameters ---------- name: str The name of the new database. Returns ------- Database The new database object. """ base_uri = re.sub(r"/envs/(.+)/dbs", "", self.uri) uri = "{base_uri}/dbs".format(base_uri=base_uri) task_data = self.request(method="POST", uri=uri, data={"db": name}) task = self.create_task(uri, task_data) task.wait() db_uri = "{uri}/{name}".format(uri=self.uri, name=name) db_obj = Database(db_uri, self.auth) self.__setitem__(name, db_obj) return db_obj def fetch(self): """Fetch and store database objects. """ dbs = self.request(uri=self.uri) for db_obj in dbs: name = str(db_obj["name"]) db_uri = self.get_resource_uri(name) self.__setitem__(name, Database(db_uri, self.auth, data=db_obj)) def get_resource_uri(self, name): """Generate the database URI. Parameters ---------- name : str The name of the database. Returns ------- str The database URI. """ return "{base_uri}/{name}".format(base_uri=self.uri, name=name) def set_base_uri(self, base_uri): """Set the base URI for database resources. Parameters ---------- base_uri : str The base URI to use for generating the new URI. """ uri = "{}/dbs".format(base_uri) self.uri = uri
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/databaselist.py", "copies": "1", "size": "2067", "license": "mit", "hash": 2929313313696006000, "line_mean": 26.1973684211, "line_max": 76, "alpha_frac": 0.5374939526, "autogenerated": false, "ratio": 3.9371428571428573, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4974636809742857, "avg_score": null, "num_lines": null }
""" Acquia Cloud API data resource. """ import json import logging import backoff import requests import requests_cache import time from acapi.version import __version__ from platform import python_version from pprint import pformat LOGGER = logging.getLogger("acapi.resources.acquiadata") class AcquiaData(object): """Acquia Cloud API abstract network resource.""" #: User Agent string RAW_AGENT = "Acquia Cloud API Client/{mver} (Python {pver})" USER_AGENT = RAW_AGENT.format(mver=__version__, pver=python_version()) SESSION = None def __init__(self, uri, auth, data=None): """Constructor. Parameters ---------- uri : str The base URI for the resource. auth : tuple The authentication credentials to use for the request. data : dict Raw data from ACAPI. """ self.uri = uri self.auth = auth self.data = data self.last_response = None self.session = self._get_session() def _get_session(self): """Generate new session object. :return: requests.Session """ if not AcquiaData.SESSION: AcquiaData.SESSION = requests.Session() return AcquiaData.SESSION def create_task(self, uri, data): """Create a new task object from a responses response object. Parameters ---------- uri: str The URI for the action that triggered the task. data: dict The task data returned by the triggering request. Returns ------- Task The Task object. """ # We have to do this here to avoid circular dependencies from acapi.resources.task import Task task = Task(uri, self.auth, data=data) return task def get_last_response(self): """Fetch the last response object. """ return self.last_response @backoff.on_exception( backoff.expo, requests.exceptions.RequestException, max_time=10 ) def request( self, uri=None, method="GET", data=None, params={}, decode_json=True, headers={}, stream=False, ): """Perform a HTTP requests. Parameters ---------- uri : str The URI to use for the request. method : str The HTTP method to use for the request. data : dict Any data to send as part of a post request body. params : dict Query string parameters. decode_json : bool Decode response or not. headers : dict The HTTP request headers. stream: bool If response is streamed. Returns ------- dict Decoded JSON response data as a dict object. """ self.last_response = None if uri is None: uri = self.uri headers["User-Agent"] = self.USER_AGENT uri = "{}.json".format(uri) resp = None if "GET" == method: attempt = 0 while attempt <= 5: resp = self.session.get( uri, auth=self.auth, headers=headers, params=params, stream=stream ) if resp.status_code not in list(range(500, 505)): # No need to retry for if not a server error type. break attempt += 1 params["acapi_retry"] = attempt time.sleep((attempt ** 2.0) / 10) # We need to unset the property or it sticks around. if "acapi_retry" in params: del params["acapi_retry"] if "POST" == method: jdata = json.dumps(data) resp = self.session.post( uri, auth=self.auth, headers=headers, params=params, data=jdata ) # This is a sledgehammer but fine grained invalidation is messy. if self.is_cache_enabled(): requests_cache.clear() if "DELETE" == method: resp = self.session.delete(uri, auth=self.auth, headers=headers, params=params) # Quickest and easiest way to do this. if self.is_cache_enabled(): requests_cache.clear() if hasattr(resp, "from_cache") and resp.from_cache: LOGGER.info("%s %s returned from cache", method, uri) self.last_response = resp try: resp.raise_for_status() except requests.exceptions.HTTPError as exp: LOGGER.info( "Failed request response headers: \n%s", pformat(exp.response.headers, indent=2), ) raise if stream: return resp if decode_json: return resp.json() return resp.content def is_cache_enabled(self): """Checks if requests cache is enabled. :return: Cache status. :rtype: bool. """ return hasattr(requests.Session(), "cache")
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/acquiadata.py", "copies": "1", "size": "5136", "license": "mit", "hash": 7489766803480367000, "line_mean": 26.0315789474, "line_max": 91, "alpha_frac": 0.5401090343, "autogenerated": false, "ratio": 4.509218612818262, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5549327647118262, "avg_score": null, "num_lines": null }
"""Acquia Cloud API domain resource. """ import re from acapi.resources.acquiaresource import AcquiaResource class Domain(AcquiaResource): """Domain record associated with an environment.""" valid_keys = ["name"] def cache_purge(self): """Purge the varnish cache for the domain. Returns ------- bool Was the cache cleared? """ uri = "{}/cache".format(self.uri) data = self.request(uri=uri, method="DELETE") task = self.create_task(uri, data) task.wait() return True def delete(self): """Delete the domain record. Returns ------- bool Was the domain record deleted? """ data = self.request(method="DELETE") task = self.create_task(self.uri, data) task.wait() return True def move(self, target): """Move a domain from one environment to another. Parameters ---------- target : str The name of the environment the domain is being moved to. Returns ------- Domain The new domain object or None is the move fails. """ # These regex hacks are needed because Acquia doesn't keep this # function with domains, which sucks. pattern = re.compile("/envs/(.*)/domains/(.*)") matches = pattern.search(self.uri) current_env = matches.group(1) domain = matches.group(2) move_uri = "%s/%s" % (pattern.sub(r"/domain-move/\g<1>", self.uri), target) data = {"domains": [domain]} task_data = self.request(uri=move_uri, method="POST", data=data) task = self.create_task(move_uri, task_data) task.wait() # Another hack, this time to get the URI for the domain. env_search = "/{}/".format(current_env) env_target = "/{}/".format(target) new_uri = self.uri.replace(env_search, env_target) return Domain(new_uri, self.auth)
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/domain.py", "copies": "1", "size": "2027", "license": "mit", "hash": 4152364273745519000, "line_mean": 27.1527777778, "line_max": 83, "alpha_frac": 0.5589541194, "autogenerated": false, "ratio": 4.062124248496994, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00016534391534391533, "num_lines": 72 }
""" Acquia Cloud API Environment resource. """ import re from acapi.resources.acquiaresource import AcquiaResource from acapi.resources.database import Database from acapi.resources.databaselist import DatabaseList from acapi.resources.domain import Domain from acapi.resources.domainlist import DomainList from acapi.resources.server import Server from acapi.resources.serverlist import ServerList class Environment(AcquiaResource): """Environment associated with a site.""" #: Valid keys for environment object. valid_keys = [ "name", "vcs_path", "ssh_host", "db_clusters", "default_domain", "livedev", ] def copy_files(self, target): """Copy files to another environment. Parameters ---------- target : str The name of the environment to copy the files to. Returns ------- bool Were the files successfully copied? """ pattern = re.compile("/envs/(.*)") base_uri = pattern.sub(r"/files-copy/\g<1>", self.uri) uri = "{uri}/{target}".format(uri=base_uri, target=target) task_data = self.request(uri=uri, method="POST") task = self.create_task(uri, task_data) task.wait() return True def db(self, name): """Fetch a database associated with the environment. Parameters ---------- name : str The name of the database to retrieve. Returns ------- Database The requested database resource object. """ uri = "%s/dbs/%s" % (self.uri, name) return Database(uri, self.auth) def dbs(self): """Fetch all databases associated with the environment. Returns ------- DatabaseList Dictionary of the databases keyed by name. """ dbs = DatabaseList(self.uri, self.auth) return dbs def deploy_code(self, git_ref): """Deploy code to the environment. Parameters ---------- git_ref : string The git reference to deploy. Must be a branch/tag name. Returns ------- bool Was the code successfully deployed? """ uri = "{}/code-deploy".format(self.uri) params = {"path": git_ref} task_data = self.request(uri=uri, method="POST", params=params) task = self.create_task(uri, task_data) task.wait() return True def domain(self, name): """Fetch a domain resource object. Parameters ---------- name : string The FQDN of the domain to lookup. Returns ------- Domain The domain resource object. """ uri = "{uri}/domains/{name}".format(uri=self.uri, name=name) return Domain(uri, self.auth) def domains(self): """Fetch a list of domains associated with the environment. returns dict of domains keyed by FQDN. """ domains = DomainList(self.uri, self.auth) return domains def livedev(self, enable, discard=True): """Enable or disable live dev for the domain. Parameters ---------- enable : bool Enable live development for this environment? discard : bool Discard all non committed changes? Returns ------- Environment This environment object. """ action = "enable" params = {} if not enable: action = "disable" uri = "{uri}/livedev/{action}".format(uri=self.uri, action=action) if not enable and discard: params["discard"] = 1 task_data = self.request(uri=uri, method="POST", params=params) task = self.create_task(uri, task_data) task.wait() return self def server(self, hostname): """Fetch a server associated with the environment. name The hostname of the server to lookup. return Server object """ uri = ("%s/servers/%s", (self.uri, hostname)) return Server(uri, self.auth) def servers(self): """Fetch a list of servers associated with the environment. returns dict of servers keyed by hostname. """ servers = ServerList(self.uri, self.auth) return servers
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/environment.py", "copies": "1", "size": "4455", "license": "mit", "hash": 5134587949477459000, "line_mean": 24.7514450867, "line_max": 74, "alpha_frac": 0.5611672278, "autogenerated": false, "ratio": 4.5552147239263805, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 173 }
"""Acquia Cloud API Exceptions.""" from pprint import pformat class AcquiaCloudException(Exception): """Generic Acquia Cloud API Exception. All ACAPI exceptions should extend this class. """ pass class AcquiaCloudNoDataException(AcquiaCloudException): """No data found exception.""" pass class AcquiaCloudTaskFailedException(AcquiaCloudException): """An Acquia task failure exception.""" def __init__(self, message, task): """Constructor. Parameters ---------- message: str The error message. task: Task The Task object for the task that failed. """ super(AcquiaCloudTaskFailedException, self).__init__(message) self.message = message self.task = task def __str__(self): """Return the string representation of the exception. Returns ------- str The error message and pretty printed Task object properties. """ task = pformat(self.task, indent=4) return "{msg}\n{task}".format(msg=self.message, task=task) class AcquiaCloudTimeoutError(AcquiaCloudTaskFailedException): """Timeout exceeded error.""" pass
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/exceptions.py", "copies": "1", "size": "1228", "license": "mit", "hash": 2601470133261799000, "line_mean": 22.1698113208, "line_max": 72, "alpha_frac": 0.6229641694, "autogenerated": false, "ratio": 4.531365313653136, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 53 }
"""Acquia Cloud API server list resource.""" from acapi.resources.acquialist import AcquiaList from acapi.resources.environment import Environment class EnvironmentList(AcquiaList): """Dict of Acquia Cloud API Environment resources keyed by short name.""" def __init__(self, base_uri, auth, *args, **kwargs): """Constructor.""" super(EnvironmentList, self).__init__(base_uri, auth, *args, **kwargs) self.fetch() def fetch(self): """Fetch and store environment objects.""" envs = self.request(uri=self.uri) for env in envs: name = str(env["name"]) env_uri = self.get_resource_uri(name) self.__setitem__(name, Environment(env_uri, self.auth, data=env)) def get_resource_uri(self, name): """Generate the resource URI. Parameters ---------- name : str The name of the environment resource. Returns ------- str The resource URI. """ return "{base_uri}/{name}".format(base_uri=self.uri, name=name) def set_base_uri(self, base_uri): """Set the base URI for server resources. Parameters ---------- base_uri : str The base URI to use for generating the new URI. """ uri = "{}/envs".format(base_uri) self.uri = uri
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/environmentlist.py", "copies": "1", "size": "1383", "license": "mit", "hash": 5596327021361080000, "line_mean": 28.4255319149, "line_max": 78, "alpha_frac": 0.5668835864, "autogenerated": false, "ratio": 4.1656626506024095, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 47 }
"""Acquia Cloud API server list resource. """ from acapi.resources.acquialist import AcquiaList from acapi.resources.server import Server class ServerList(AcquiaList): """Dict of Acquia Cloud API Server resources keyed by hostname.""" def __init__(self, base_uri, auth, *args, **kwargs): """Constructor.""" super(ServerList, self).__init__(base_uri, auth, *args, **kwargs) self.fetch() def fetch(self): """Fetch and store server objects. """ servers = self.request(uri=self.uri) for server in servers: name = str(server["name"]) server_uri = self.get_resource_uri(name) self.__setitem__(name, Server(server_uri, self.auth, data=server)) def get_resource_uri(self, name): """Generate the server URI. Parameters ---------- name : str The hostname of the server. Returns ------- str The server URI. """ return "{base_uri}/{name}".format(base_uri=self.uri, name=name) def set_base_uri(self, base_uri): """Set the base URI for server resources. Parameters ---------- base_uri : str The base URI to use for generating the new URI. """ uri = "{}/servers".format(base_uri) self.uri = uri
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/serverlist.py", "copies": "1", "size": "1358", "license": "mit", "hash": -1563295469358689500, "line_mean": 27.8936170213, "line_max": 78, "alpha_frac": 0.558910162, "autogenerated": false, "ratio": 4.115151515151515, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5174061677151515, "avg_score": null, "num_lines": null }
"""Acquia Cloud API site resource. """ from acapi.resources.acquiaresource import AcquiaResource from acapi.resources.environment import Environment from acapi.resources.environmentlist import EnvironmentList from acapi.resources.task import Task from acapi.resources.tasklist import TaskList class Site(AcquiaResource): """Site (or subscription) resource.""" #: Valid keys for site object. valid_keys = [ "title", "name", "production_mode", "unix_username", "vcs_type", "vcs_url", ] def copy_code(self, source, target): """Copy code from one environment to another. Parameters ---------- source : str The name of the source environment. target : str The name of the target environment. Returns ------- bool Was the code successfully copied? """ uri = "{base}/code-deploy/{source}/{target}".format( base=self.uri, source=source, target=target ) task_data = self.request(uri=uri, method="POST") task = self.create_task(uri, task_data) task.wait() return True def environment(self, name): """Retrieve an environment resource. Parameters ---------- name : str The name of the environment. Returns ------- Environment The environment resource object. """ uri = "{uri}/envs/{name}".format(uri=self.uri, name=name) return Environment(uri, self.auth) def environments(self): """Retrieve a list of environments. Returns ------- EnvironmentList Dictionary of environments keyed by name. """ envs = EnvironmentList(self.uri, self.auth) return envs def task(self, task_id): """Retrieve a task. Parameters ---------- task_id : int The task identifier. Returns ------- Task The task resource object. """ uri = "%s/tasks/%d" % (self.uri, task_id) return Task(uri, self.auth, hack_uri=False) def tasks(self): """Retrieve all tasks for the site. Returns ------- TaskList Dictionary of task resources keyed by id. """ tasks = TaskList(self.uri, self.auth) return tasks
{ "repo_name": "skwashd/python-acquia-cloud", "path": "acapi/resources/site.py", "copies": "1", "size": "2467", "license": "mit", "hash": 2889276708821521400, "line_mean": 23.67, "line_max": 65, "alpha_frac": 0.5460072963, "autogenerated": false, "ratio": 4.619850187265918, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5665857483565918, "avg_score": null, "num_lines": null }
"""Acquires and associates a public IP to an account.""" from baseCmd import * from baseResponse import * class associateIpAddressCmd (baseCmd): typeInfo = {} def __init__(self): self.isAsync = "true" """the account to associate with this IP address""" self.account = None self.typeInfo['account'] = 'string' """the ID of the domain to associate with this IP address""" self.domainid = None self.typeInfo['domainid'] = 'uuid' """an optional field, whether to the display the IP to the end user or not""" self.fordisplay = None self.typeInfo['fordisplay'] = 'boolean' """should be set to true if public IP is required to be transferable across zones, if not specified defaults to false""" self.isportable = None self.typeInfo['isportable'] = 'boolean' """The network this IP address should be associated to.""" self.networkid = None self.typeInfo['networkid'] = 'uuid' """Deploy VM for the project""" self.projectid = None self.typeInfo['projectid'] = 'uuid' """region ID from where portable IP is to be associated.""" self.regionid = None self.typeInfo['regionid'] = 'integer' """the VPC you want the IP address to be associated with""" self.vpcid = None self.typeInfo['vpcid'] = 'uuid' """the ID of the availability zone you want to acquire an public IP address from""" self.zoneid = None self.typeInfo['zoneid'] = 'uuid' self.required = [] class associateIpAddressResponse (baseResponse): typeInfo = {} def __init__(self): """public IP address id""" self.id = None self.typeInfo['id'] = 'string' """the account the public IP address is associated with""" self.account = None self.typeInfo['account'] = 'string' """the ID of the ACL applied to this IP""" self.aclid = None self.typeInfo['aclid'] = 'string' """date the public IP address was acquired""" self.allocated = None self.typeInfo['allocated'] = 'date' """the ID of the Network associated with the IP address""" self.associatednetworkid = None self.typeInfo['associatednetworkid'] = 'string' """the name of the Network associated with the IP address""" self.associatednetworkname = None self.typeInfo['associatednetworkname'] = 'string' """the domain the public IP address is associated with""" self.domain = None self.typeInfo['domain'] = 'string' """the domain ID the public IP address is associated with""" self.domainid = None self.typeInfo['domainid'] = 'string' """is public ip for display to the regular user""" self.fordisplay = None self.typeInfo['fordisplay'] = 'boolean' """the virtual network for the IP address""" self.forvirtualnetwork = None self.typeInfo['forvirtualnetwork'] = 'boolean' """public IP address""" self.ipaddress = None self.typeInfo['ipaddress'] = 'string' """is public IP portable across the zones""" self.isportable = None self.typeInfo['isportable'] = 'boolean' """true if the IP address is a source nat address, false otherwise""" self.issourcenat = None self.typeInfo['issourcenat'] = 'boolean' """true if this ip is for static nat, false otherwise""" self.isstaticnat = None self.typeInfo['isstaticnat'] = 'boolean' """true if this ip is system ip (was allocated as a part of deployVm or createLbRule)""" self.issystem = None self.typeInfo['issystem'] = 'boolean' """the ID of the Network where ip belongs to""" self.networkid = None self.typeInfo['networkid'] = 'string' """the physical network this belongs to""" self.physicalnetworkid = None self.typeInfo['physicalnetworkid'] = 'string' """the project name of the address""" self.project = None self.typeInfo['project'] = 'string' """the project id of the ipaddress""" self.projectid = None self.typeInfo['projectid'] = 'string' """purpose of the IP address. In Acton this value is not null for Ips with isSystem=true, and can have either StaticNat or LB value""" self.purpose = None self.typeInfo['purpose'] = 'string' """State of the ip address. Can be: Allocatin, Allocated and Releasing""" self.state = None self.typeInfo['state'] = 'string' """virutal machine display name the ip address is assigned to (not null only for static nat Ip)""" self.virtualmachinedisplayname = None self.typeInfo['virtualmachinedisplayname'] = 'string' """virutal machine id the ip address is assigned to (not null only for static nat Ip)""" self.virtualmachineid = None self.typeInfo['virtualmachineid'] = 'string' """virutal machine name the ip address is assigned to (not null only for static nat Ip)""" self.virtualmachinename = None self.typeInfo['virtualmachinename'] = 'string' """the ID of the VLAN associated with the IP address. This parameter is visible to ROOT admins only""" self.vlanid = None self.typeInfo['vlanid'] = 'string' """the VLAN associated with the IP address""" self.vlanname = None self.typeInfo['vlanname'] = 'string' """virutal machine (dnat) ip address (not null only for static nat Ip)""" self.vmipaddress = None self.typeInfo['vmipaddress'] = 'string' """VPC the ip belongs to""" self.vpcid = None self.typeInfo['vpcid'] = 'string' """the ID of the zone the public IP address belongs to""" self.zoneid = None self.typeInfo['zoneid'] = 'string' """the name of the zone the public IP address belongs to""" self.zonename = None self.typeInfo['zonename'] = 'string' """the list of resource tags associated with ip address""" self.tags = [] """the ID of the latest async job acting on this object""" self.jobid = None self.typeInfo['jobid'] = '' """the current status of the latest async job acting on this object""" self.jobstatus = None self.typeInfo['jobstatus'] = '' class tags: def __init__(self): """"the account associated with the tag""" self.account = None """"customer associated with the tag""" self.customer = None """"the domain associated with the tag""" self.domain = None """"the ID of the domain associated with the tag""" self.domainid = None """"tag key name""" self.key = None """"the project name where tag belongs to""" self.project = None """"the project id the tag belongs to""" self.projectid = None """"id of the resource""" self.resourceid = None """"resource type""" self.resourcetype = None """"tag value""" self.value = None
{ "repo_name": "MissionCriticalCloud/marvin", "path": "marvin/cloudstackAPI/associateIpAddress.py", "copies": "1", "size": "7220", "license": "apache-2.0", "hash": -5275564047006220000, "line_mean": 42.4939759036, "line_max": 142, "alpha_frac": 0.6048476454, "autogenerated": false, "ratio": 4.239577216676453, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5344424862076453, "avg_score": null, "num_lines": null }
## Acquried from: https://gist.github.com/jasdumas/53b0cbfbb8af3e435dafb833357fd67f try: from html import escape except ImportError: from cgi import escape import os from string import Template import sys import platform import socket from datetime import datetime output_template = Template('''<!DOCTYPE html> <html> <head> <title>pyinfo()</title> <meta name="robots" content="noindex,nofollow,noarchive,nosnippet"> <style> body{width: 700px; margin: 10px auto; font:12px sans-serif;} table{border-collapse:collapse; width:100%;} td,th{border:1px solid #999999; padding: 3px} .e{width:35%;background-color:#ffffcc;font-weight:bold;color:#000} .h{background:url('http://python.org/images/header-bg2.png') repeat-x;} .v{background:#f2f2f2;} img{float:right;border:0;} </style> </head> <body> <table><tr class="h"><td> <img src="http://python.org/images/python-logo.gif"> <h1 class="p">Python $py_version</h1> </td></tr></table> $output </body> </html> ''') def make_table(title, data): return '<h2>{}</h2><table>{}</table><br>'.format(title, ''.join( '<tr><td class="e">{}</td><td class="v">{}</td></tr>'.format(*row) for row in data )) def pyinfo(data=None, allow_import=True, template=output_template, make_table_fn=make_table): """ @param data: None or a list of additional (key, values) to include in report, like [('Application version', '2.1')] @param allow_import: allow importing (many) new modules or not, defaults to True @return: complete rendered html """ _process_out = lambda out: '' if not out else make_table_fn(*out) output = '' output += _process_out(section_server_info(data)) output += _process_out(section_system()) output += _process_out(section_py_internals()) output += _process_out(section_os_internals()) output += _process_out(section_environ()) if allow_import: output += _process_out(section_compression()) output += _process_out(section_ldap(allow_import)) output += _process_out(section_socket()) if allow_import: output += _process_out(section_multimedia()) output += _process_out(section_packages()) if template is None: return output return template.substitute(output=output, py_version=platform.python_version()) def imported(module): """ returns 'enabled' if a module is imported, '-' if it isn't""" try: if module not in sys.modules: __import__(module) return 'enabled' except: return '-' def section_system(): data = [] if hasattr(sys, 'subversion'): data.append(('Python Subversion', ', '.join(x for x in sys.subversion if x))) if platform.dist()[0] != '' and platform.dist()[1] != '': data.append(('OS Version', '%s %s (%s %s)' % ( platform.system(), platform.release(), platform.dist()[0].capitalize(), platform.dist()[1]))) else: data.append(('OS Version', '%s %s' % (platform.system(), platform.release()))) if hasattr(sys, 'executable'): data.append(('Executable', sys.executable)) data.append(('Build Date', platform.python_build()[1])) data.append(('Compiler', platform.python_compiler())) if hasattr(sys, 'api_version'): data.append(('Python API', sys.api_version)) return 'System', data def section_server_info(data): data = list(data) if data else [] data.append(('Hostname', socket.gethostname())) try: data.append(('IP Address', socket.gethostbyname(socket.gethostname()))) except: pass data.append(('Local time', str(datetime.now()))) data.append(('UTC time', str(datetime.utcnow()))) return 'Server Info', data def section_py_internals(): data = [] if hasattr(sys, 'builtin_module_names'): data.append(('Built-in Modules', ', '.join(sys.builtin_module_names))) data.append(('Byte Order', sys.byteorder + ' endian')) if hasattr(sys, 'getcheckinterval'): data.append(('Check Interval', sys.getcheckinterval())) if hasattr(sys, 'getfilesystemencoding'): data.append(('File System Encoding', sys.getfilesystemencoding())) data.append(('Maximum Integer Size', str(sys.maxsize) + ' (%s)' % str(hex(sys.maxsize)).upper().replace("X", "x"))) if hasattr(sys, 'getrecursionlimit'): data.append(('Maximum Recursion Depth', sys.getrecursionlimit())) if hasattr(sys, 'tracebacklimit'): data.append(('Maximum Traceback Limit', sys.tracebacklimit)) else: data.append(('Maximum Traceback Limit', '1000')) data.append(('Maximum Unicode Code Point', sys.maxunicode)) return 'Python Internals', data def section_os_internals(): data = [] if hasattr(os, 'getcwd'): data.append(('Current Working Directory', os.getcwd())) if hasattr(os, 'getegid'): data.append(('Effective Group ID', os.getegid())) if hasattr(os, 'geteuid'): data.append(('Effective User ID', os.geteuid())) if hasattr(os, 'getgid'): data.append(('Group ID', os.getgid())) if hasattr(os, 'getgroups'): data.append(('Group Membership', ', '.join(map(str, os.getgroups())))) if hasattr(os, 'linesep'): data.append(('Line Seperator', repr(os.linesep)[1:-1])) if hasattr(os, 'getloadavg'): data.append(('Load Average', ', '.join(str(round(x, 2)) for x in os.getloadavg()))) if hasattr(os, 'pathsep'): data.append(('Path Seperator', os.pathsep)) try: if hasattr(os, 'getpid') and hasattr(os, 'getppid'): data.append(('Process ID', ('%s (parent: %s)' % (os.getpid(), os.getppid())))) except: pass if hasattr(os, 'getuid'): data.append(('User ID', os.getuid())) return 'OS Internals', data def section_environ(): envvars = list(os.environ.keys()) envvars.sort() data = [] for envvar in envvars: data.append((envvar, escape(str(os.environ[envvar])))) return 'Environment', data def section_compression(): return ('Compression and archiving', [ ('SQLite3', imported('sqlite3')), ('Bzip2 Support', imported('bz2')), ('Gzip Support', imported('gzip')), ('Tar Support', imported('tarfile')), ('Zip Support', imported('zipfile')), ('Zlib Support', imported('zlib')) ]) def section_ldap(allow_import): try: if allow_import: import ldap else: ldap = sys.modules['ldap'] except (KeyError, ImportError): return '' return ('LDAP support', [ ('Python-LDAP Version', ldap.__version__), ('API Version', ldap.API_VERSION), ('Default Protocol Version', ldap.VERSION), ('Minimum Protocol Version', ldap.VERSION_MIN), ('Maximum Protocol Version', ldap.VERSION_MAX), ('SASL Support (Cyrus-SASL)', ldap.SASL_AVAIL), ('TLS Support (OpenSSL)', ldap.TLS_AVAIL), ('Vendor Version', ldap.VENDOR_VERSION) ]) def section_socket(): return ('Socket', [ ('Hostname (fqdn)', socket.gethostbyaddr(socket.gethostname())[0]), ('IPv6 Support', getattr(socket, 'has_ipv6', False)), ('SSL Support', hasattr(socket, 'ssl')), ]) def section_multimedia(): return ('Multimedia support', [ ('AIFF Support', imported('aifc')), ('Color System Conversion', imported('colorsys')), ('curses Support', imported('curses')), ('IFF Chunk Support', imported('chunk')), ('Image Header Support', imported('imghdr')), ('OSS Audio Device Support', imported('ossaudiodev')), ('Raw Audio Support', imported('audioop')), ('SGI RGB Support', imported('rgbimg')), ('Sound Header Support', imported('sndhdr')), ('Sun Audio Device Support', imported('sunaudiodev')), ('Sun AU Support', imported('sunau')), ('Wave Support', imported('wave'))]) def section_packages(): data = [] try: import pkg_resources except: return '' for pkg in pkg_resources.working_set: assert isinstance(pkg, pkg_resources.Distribution) data.append((pkg.project_name, pkg.version if pkg.has_version() else '[uknown]')) return 'Installed Modules (Site Packages)', sorted(data, key=lambda a: a[0].lower()) if __name__ == '__main__': def _text_table(title, data): return '===== {}\n{}\n\n'.format(title, '\n'.join( '{:30} {}'.format(*row) for row in data )) print(pyinfo(template=None, make_table_fn=_text_table))
{ "repo_name": "jasdumas/jasdumas.github.io", "path": "tech-short-papers/system_info.py", "copies": "1", "size": "8636", "license": "mit", "hash": -3477928878366998000, "line_mean": 34.6859504132, "line_max": 119, "alpha_frac": 0.6088466883, "autogenerated": false, "ratio": 3.7032590051457976, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9801253249286399, "avg_score": 0.002170488831879812, "num_lines": 242 }
"""" A crawler for the data-science-jobs.com website. """ from logging import getLogger from scrapy.spiders import Rule, CrawlSpider from scrapy.linkextractors import LinkExtractor from joby.items import JobLoader, Job from joby.utilities import Parser log = getLogger(__name__) class DataScienceJobsSpider(CrawlSpider): name = 'data-science-jobs' allowed_domains = ['www.data-science-jobs.com'] start_urls = ['http://www.data-science-jobs.com'] job_links = Rule(LinkExtractor(allow='detail\/'), callback='parse_job') pagination_links = Rule(LinkExtractor(allow='page=\d+')) rules = [job_links, pagination_links] # noinspection PyUnresolvedReferences def parse_job(self, response): """ Returns a Job item. @url http://www.data-science-jobs.com/detail/20 @returns items 1 @returns requests 0 parse_overview_table -------------------- @scrapes reference_id @scrapes apply_url @pipeline job_category Data Scientist @scrapes contract_type @scrapes allows_remote @scrapes workload duration @scrapes publication_date @scrapes days_since_posted parse_webpage_info ------------------ @scrapes website_url @scrapes website_job_id @scrapes job_url parse_job_details ----------------- @scrapes job_title @scrapes keywords @scrapes description @scrapes abstract parse_company_info ------------------ @scrapes company_name @scrapes company_description @scrapes company_url parse_company_address --------------------- @scrapes company_address @scrapes company_zipcode @scrapes company_country @scrapes company_city """ loader = JobLoader(item=Job(), response=response) parser = DataScienceJobsParser(self, response, job=loader) parser.parse_job_overview() parser.parse_job_details() parser.parse_company_info() parser.parse_company_address() parser.parse_webpage_info() parser.job.load_item() log.info('%s spider scraped %s', self.name, response.url) return parser.job.item # noinspection PyUnresolvedReferences class DataScienceJobsParser(Parser): X_BASE = '//div[@id="detailView"]/' X_TTILE = X_BASE + '/h1/text()' X_KEYWORDS = X_BASE + 'div[4]/div[2]/text()' X_ABSTRACT = X_BASE + 'div[2]/div[2]/text()' X_DESCRIPTION = X_BASE + 'div[3]/div[2]/text()' CSS_COMPANY_ADDRESS = 'tr>td>address::text' def parse_job_overview(self): table = self.soup.find('table', class_='detailViewTable') overview_fields = ( ('Category', 'job_category'), ('Type', 'contract_type'), ('Home Office', 'allows_remote'), ('Min. Budget', 'salary'), ('Age', 'days_since_posted'), ('Age', 'publication_date'), ('Reference ID', 'reference_id'), ('Apply URL', 'apply_url'), ('Duration', 'duration'), ('Workload', 'workload'), ('Contact Person', 'contact_person'), ) self._parse_table(table, overview_fields) log.info('Parsed job overview from %s', self.response.url) def parse_job_details(self): self.job.add_xpath('keywords', self.X_KEYWORDS) self.job.add_xpath('description', self.X_DESCRIPTION) self.job.add_xpath('abstract', self.X_ABSTRACT) log.info('Parsed job details from %s', self.response.url) def parse_company_info(self): table = self.soup.find_all(class_='detailViewTable')[1] company_fields = { ('Name', 'company_name'), ('Description', 'company_description'), ('Website', 'company_url'), } self._parse_table(table, company_fields) log.info('Parsed company details from %s', self.response.url) def parse_company_address(self): self.job.add_css('company_address', self.CSS_COMPANY_ADDRESS) self.job.add_css('company_city', self.CSS_COMPANY_ADDRESS) self.job.add_css('company_zipcode', self.CSS_COMPANY_ADDRESS) self.job.add_css('company_country', self.CSS_COMPANY_ADDRESS) log.info('Parsed company address from %s', self.response.url) def parse_webpage_info(self): self.job.add_xpath('job_title', self.X_TTILE) self.job.add_value('website_url', self.response.url) self.job.add_value('reference_id', self.response.url) self.job.add_value('job_url', self.response.url) self.job.add_value('website_job_id', self.response.url) log.info('Parsed webpage info from %s', self.response.url) def _parse_table(self, table, expected_pairs): log.info('Parsing table from %s', self.response.url) key_tags = table.find_all('td', class_='detailViewTableKey') value_tags = table.find_all('td', class_='detailViewTableValue') def extract(tag): if tag.next_element.name == 'a': return tag.next_element.attrs['href'] else: return tag.text keys = map(extract, key_tags) values = map(extract, value_tags) rows = dict(zip(keys, values)) expected_keys = [v for _, v in expected_pairs] unscraped = set(keys) - set(expected_keys) if unscraped: log.warning('Not scraping %s', list(unscraped)) for label, key in expected_pairs: if label in keys: self.job.add_value(key, rows[label]) log.debug('Scraped %s = %s', key, rows[label]) else: log.debug('%s is missing', key)
{ "repo_name": "cyberbikepunk/job-spiders", "path": "joby/spiders/data_science_jobs.py", "copies": "1", "size": "5815", "license": "mit", "hash": -245468572191009100, "line_mean": 33.6130952381, "line_max": 75, "alpha_frac": 0.588650043, "autogenerated": false, "ratio": 3.7686325340246274, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48572825770246275, "avg_score": null, "num_lines": null }
# acrobot # import trajectory class and necessary dependencies from pytrajectory import ControlSystem import numpy as np from sympy import cos, sin # define the function that returns the vectorfield def f(x,u): x1, x2, x3, x4 = x u1, = u m = 1.0 # masses of the rods [m1 = m2 = m] l = 0.5 # lengths of the rods [l1 = l2 = l] I = 1/3.0*m*l**2 # moments of inertia [I1 = I2 = I] g = 9.81 # gravitational acceleration lc = l/2.0 d11 = m*lc**2+m*(l**2+lc**2+2*l*lc*cos(x1))+2*I h1 = -m*l*lc*sin(x1)*(x2*(x2+2*x4)) d12 = m*(lc**2+l*lc*cos(x1))+I phi1 = (m*lc+m*l)*g*cos(x3)+m*lc*g*cos(x1+x3) ff = np.array([ x2, u1, x4, -1/d11*(h1+phi1+d12*u1) ]) return ff # system state boundary values for a = 0.0 [s] and b = 2.0 [s] xa = [ 0.0, 0.0, 3/2.0*np.pi, 0.0] xb = [ 0.0, 0.0, 1/2.0*np.pi, 0.0] # boundary values for the inputs ua = [0.0] ub = [0.0] # create trajectory object S = ControlSystem(f, a=0.0, b=2.0, xa=xa, xb=xb, ua=ua, ub=ub) # alter some method parameters to increase performance S.set_param('su', 10) # run iteration S.solve() # the following code provides an animation of the system above # for a more detailed explanation have a look at the 'Visualisation' section in the documentation import sys import matplotlib as mpl from pytrajectory.visualisation import Animation def draw(xti, image): phi1, phi2 = xti[0], xti[2] L=0.5 x1 = L*cos(phi2) y1 = L*sin(phi2) x2 = x1+L*cos(phi2+phi1) y2 = y1+L*sin(phi2+phi1) # rods rod1 = mpl.lines.Line2D([0,x1],[0,y1],color='k',zorder=0,linewidth=2.0) rod2 = mpl.lines.Line2D([x1,x2],[y1,y2],color='0.3',zorder=0,linewidth=2.0) # pendulums sphere1 = mpl.patches.Circle((x1,y1),0.01,color='k') sphere2 = mpl.patches.Circle((0,0),0.01,color='k') image.lines.append(rod1) image.lines.append(rod2) image.patches.append(sphere1) image.patches.append(sphere2) return image if not 'no-pickle' in sys.argv: # here we save the simulation results so we don't have to run # the iteration again in case the following fails S.save(fname='ex5_Acrobot.pcl') if 'plot' in sys.argv or 'animate' in sys.argv: A = Animation(drawfnc=draw, simdata=S.sim_data, plotsys=[(0,'phi1'),(2,'phi2')], plotinputs=[(0,'u')]) A.set_limits(xlim=(-1.1,1.1), ylim=(-1.1,1.1)) if 'plot' in sys.argv: A.show(t=S.b) if 'animate' in sys.argv: A.animate() A.save('ex5_Acrobot.gif')
{ "repo_name": "akunze3/pytrajectory", "path": "examples/ex5_Acrobot.py", "copies": "1", "size": "2711", "license": "bsd-3-clause", "hash": -122370026243211490, "line_mean": 24.3364485981, "line_max": 97, "alpha_frac": 0.5713758761, "autogenerated": false, "ratio": 2.63715953307393, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8531335175791619, "avg_score": 0.035440046676462056, "num_lines": 107 }
# Acronym generator: # Random Access Memory : RAM # ask for a string # user_input = input("Enter words to convert to an acronym: ") # # # convert the string to uppercase and a list # user_input = user_input.upper().split() # # # cycle through the list # for word in user_input: # # print the 1st letter of the word and eliminate the newline # print(word[0], end="") # # # add a newline # print() # Caesar Cipher # A - Z 65-90 # a - z 97-122 # convert chr to unicode: ord() # convert unicode to chr: chr() # only change letters # Receive the message to encrypt and the number of characters to shift message = input("Enter your message : ") key = int(input("How many characters should we shift (1 - 26)")) # Prepare your secret message secret_message = "" # Cycle through each character in the message for char in message: # If it isn't a letter then keep it as it is in the else below if char.isalpha(): # Get the character code and add the shift amount char_code = ord(char) char_code += key # If uppercase then compare to uppercase unicodes if char.isupper(): # If bigger than Z subtract 26 if char_code > ord('Z'): char_code -= 26 # If smaller than A add 26 elif char_code < ord('A'): char_code += 26 # Do the same for lowercase characters else: if char_code > ord('z'): char_code -= 26 elif char_code < ord('a'): char_code += 26 # Convert from code to letter and add to message secret_message += chr(char_code) # If not a letter leave the character as is else: secret_message += char print("Encrypted :", secret_message) # To decrypt the only thing that changes is the sign of the key key = -key orig_message = "" for char in secret_message: if char.isalpha(): char_code = ord(char) char_code += key if char.isupper(): if char_code > ord('Z'): char_code -= 26 elif char_code < ord('A'): char_code += 26 else: if char_code > ord('z'): char_code -= 26 elif char_code < ord('a'): char_code += 26 orig_message += chr(char_code) else: orig_message += char print("Decrypted :", orig_message) #
{ "repo_name": "J-kaizen/kaizen", "path": "python/LTP/string_functions.py", "copies": "1", "size": "2440", "license": "mit", "hash": -1651561455093729000, "line_mean": 20.0344827586, "line_max": 70, "alpha_frac": 0.5668032787, "autogenerated": false, "ratio": 3.873015873015873, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4939819151715873, "avg_score": null, "num_lines": null }
""" A crude alarm clock for the raspberry pi """ import datetime import time class AlarmManager: def __init__(self): self.alarms = [] # TODO import alarms from persistence storage def check_alarms(self): for alarm in self.alarms: alarm.check() class Alarm: def __init__(self, hour=0, minute=0): self.hour = hour self.minute = minute self.on = True def check(self): time = datetime.datetime.now().time() if alarm.on == True and self.hour == time.hour and self.minute == time.minute: print("Beep Beep Beep!") def toggle_alarm(self): if self.on == True: self.on == False else: self.on == True class AlarmApp: pass if __name__ == "__main__": alarm_manager = AlarmManager() alarm_time = datetime.datetime.now() + datetime.timedelta(minutes=1) alarm_time = alarm_time.time().replace(second=0, microsecond=0) alarm = Alarm(alarm_time.hour, alarm_time.minute) alarm_manager.alarms.append(alarm) while True: alarm_manager.check_alarms() time.sleep(1)
{ "repo_name": "khominhvi/PiAlarmClock", "path": "alarm/alarm.py", "copies": "1", "size": "1146", "license": "mit", "hash": 8682174935578678000, "line_mean": 23.9130434783, "line_max": 86, "alpha_frac": 0.592495637, "autogenerated": false, "ratio": 3.638095238095238, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9563785481403508, "avg_score": 0.03336107873834609, "num_lines": 46 }
"""A crude script for clearing out cached profiles. It has mutated a lot, so treat it as a scratchpad and tweak it as needed. Perhaps we'll ultimately figure out some patterns and we can make it more robust. You must set environment variables for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to be able to make changes to the S3 bucket. This script doesn't assume database access; instead: Do something like this to get a seed file of geoids from a database with our ACS 1 year schema: \copy (select geoid from acs2016_1yr.geoheader where component = '00' order by geoid) to '/tmp/2016_1yr_geoids.txt'; . """ from boto.s3.connection import S3Connection, OrdinaryCallingFormat from boto.s3.key import Key from gzip import GzipFile from cStringIO import StringIO import json import re import requests import time import os, sys CACHE_KEY_YEAR = '2016' # this specifies the S3 prefix we're checking for profile JSON which needs to be cleared OBSOLETE_YEAR = '2015' # change this when we bump the 5-year release so we can recognize what is no longer welcome. GEOID_LIST = '/tmp/2016_1yr_geoids.txt' # make a file of 1 year geoids to massively reduce search space AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY') if AWS_ACCESS_KEY_ID is None or AWS_SECRET_ACCESS_KEY is None: print("You must define AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY as environment variables") sys.exit() def decode_key(k): sio = StringIO(k.get_contents_as_string()) return GzipFile(fileobj=sio).read() def releases(j_string): pat = re.compile('ACS (.+?)-year') return set(pat.findall(j_string)) def get_key(b,geoid): key_path = '1.0/data/profiles/{}/{}.json'.format(CACHE_KEY_YEAR,geoid) return b.get_key(key_path) s3 = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, calling_format=OrdinaryCallingFormat()) bucket = s3.get_bucket('embed.censusreporter.org') deleted = [] def prime_cache(geoid): url = "https://censusreporter.org/profiles/{}".format(geoid) resp = requests.get(url) if resp.status_code == 200: return True else: print("Fetch error {} for geoid {}".format(resp.status_code,geoid)) return False # with open(GEOID_LIST) as f: # for i,geoid in enumerate(f): # if i % 100 == 0: print i # geoid = geoid[:-1] # trim newline # k = get_key(bucket, geoid) # if k: # j = decode_key(k) # r = releases(j) # for release in r: # if OBSOLETE_YEAR in release: # deleted.append(geoid) # k.delete() # print "deleted", geoid # break # # errors = 0 # to_restore = list(deleted) # for i,geoid in enumerate(to_restore): # if i % 100 == 0: print "{} of {} - {}".format(i,len(to_restore),geoid) # url = "https://censusreporter.org/profiles/{}".format(geoid) # resp = requests.get(url) # if resp.status_code == 200: # deleted.remove(geoid) # else: # errors += 1 # if errors > 5: # print "5 errors so I quit" # break # time.sleep(5) def delete_embed(release_slug, embed_slug,delete=False): """Given a 'release_slug' (such as 'ACS_2017_5-year') and an 'embed_slug' (such as 'social-place_of_birth-distribution'), delete any existing JSON embeds for that combination, or report that you would have, depending on the value of 'delete' Note that this JSON is not automatically generated on 404 from outside callers, so things only exist here because someone is using an embed, and if you delete the JSON without fixing it, you break their embed. So be nice and go to the profile pages which are impacted and find the right section and click 'embed' -- or, better, be a mensch and fix https://github.com/censusreporter/censusreporter/issues/249 so that the missing cached embeds are autogenerated when needed! """ to_fix = [] for key in bucket.list('1.0/data/charts/{}'.format(release_slug)): if key.name.endswith('{}.json'.format(embed_slug)): if delete: print("deleting {}".format(key.name)) key.delete() else: print("would delete {}".format(key.name)) geoid = key.name.split('/')[-1].split('-')[0] to_fix.append("https://censusreporter.org/profiles/{}".format(geoid)) if len(to_fix) > 0: if delete: word = '' else: word = 'would ' print("{}need to fix these embeds for {}:".format(word, embed_slug)) for x in to_fix: print(x) else: print("nothing to fix") def delete_all_profiles(year_str): deleted = [] for key in bucket.list('1.0/data/profiles/{}'.format(year_str)): key.delete() deleted.append(key) print(key.name) # if len(deleted) > 10: # print("stopping at 10 for now") # break if len(deleted) % 100 == 0: print(len(deleted)) # most recently, I wanted to # delete_embed('ACS_2017_5-year', 'social-place_of_birth-distribution',delete=False) def delete_by_pattern(key_prefix,patterns,do_it=False): """Given a prefix, look at every key matching that prefix, and, if the non-prefix part of the key matches any pattern, delete it (or, if do_it is False, print that it would be done) """ regexes = [] if isinstance(patterns,basestring): print("patterns should be a sequence not a string") return for pat in patterns: if type(pat) == '_sre.SRE_Pattern': regexes.append(pat) else: regexes.append(re.compile(pat)) from collections import defaultdict delete_dict = defaultdict(list) for key in bucket.list(key_prefix): fn = key.name.replace(key_prefix,'') if fn[0] == '/': fn = fn[1:] for pat in regexes: if pat.match(fn): delete_dict[pat.pattern].append(fn) if do_it: key.delete() else: print("would delete ", key.name) continue for k,v in delete_dict.items(): print("Pattern", k, " - ", len(v)) if __name__ == '__main__': delete_by_pattern('1.0/data/profiles/2018/', [ '05000US.*', '31000US.*', '33000US.*', ], do_it=True ) delete_by_pattern('tiger2018/show/', [ '05000US.*parents.json', '31000US.*parents.json', ], do_it=True )
{ "repo_name": "censusreporter/censusreporter", "path": "cache_clearer.py", "copies": "1", "size": "6345", "license": "mit", "hash": 3915384947824329000, "line_mean": 34.4469273743, "line_max": 157, "alpha_frac": 0.6427107959, "autogenerated": false, "ratio": 3.3342091434576986, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.44769199393576986, "avg_score": null, "num_lines": null }
"""ACS5-based analysis """ import argparse import json import matplotlib.pyplot as plt import pandas as pd parser = argparse.ArgumentParser(__doc__) parser.add_argument("join", help="Business/county join file") parser.add_argument("reviews", help="Yelp review file") parser.add_argument("census", help="ACS 5-year data") parser.add_argument("name", help="Output name prefix") args = parser.parse_args() oname = args.name # Load reviews reviews = pd.DataFrame(json.loads(l) for l in open(args.reviews)) # Only use 2009 to 2013 reviews reviews = reviews[reviews.date.apply(lambda d: d >= '2009' and d < '2014')] # Reduce reviews to business review counts reviews = (reviews[['stars']] .groupby(reviews.business_id) .count() .reset_index() ) # Fix column names reviews.columns = 'business_id reviews'.split() # Load the geo join data and join with the reviews join = pd.DataFrame(json.loads(l) for l in open(args.join)) reviews = reviews.merge(join) # Get review counts by GISJOIN reviews = (reviews[['reviews']] .groupby(reviews.GISJOIN) .sum() .reset_index() ) # Load the 5-year census data census = pd.read_csv(args.census) # We want the columns that start with UEE. There should be 49. uee = [c for c in census.columns if c.startswith('UEE')] assert len(uee) == 49 census = census[['GISJOIN'] + uee] # Assign more useful column names: census.columns = ''' GISJOIN TOTAL M M_4 M5_9 M10_14 M15_17 M18_19 M20 M21 M22_24 M25_29 M30_34 M35_39 M40_44 M45_49 M50_54 M55_59 M60_61 M62_64 M65_66 M67_69 M70_74 M75_79 M80_84 M85_ F F_4 F5_9 F10_14 F15_17 F18_19 F20 F21 F22_24 F25_29 F30_34 F35_39 F40_44 F45_49 F50_54 F55_59 F60_61 F62_64 F65_66 F67_69 F70_74 F75_79 F80_84 F85_ '''.strip().split() # Compute young and old columns: age_groups = {} for n in ''' M18_19 M20 M21 M22_24 M25_29 M30_34F18_19 F20 F21 F22_24 F25_29 F30_34 '''.strip().split(): age_groups[n] = 'young' for n in ''' M35_39 M40_44 M45_49 M50_54 M55_59 M60_61 M62_64 M65_66 M67_69 M70_74 M75_79 M80_84 M85_ F35_39 F40_44 F45_49 F50_54 F55_59 F60_61 F62_64 F65_66 F67_69 F70_74 F75_79 F80_84 F85_ '''.strip().split(): age_groups[n] = 'old' yo = census.groupby(age_groups, axis=1).sum() census = pd.concat((census, yo), axis=1) # Join with reviews census = census.merge(reviews) # Normalize by total population norm = census[census.columns[3:]].div(census.TOTAL, axis=0) census = pd.concat((census[census.columns[:3]], norm), axis=1) # Whew, now we're ready to explore relationships. Plot response # rate vs age-group fraction for young and old. fig, ax = plt.subplots(2, 1) ax[0].set_yscale('log') ax[1].set_yscale('log') ax[0].scatter(census.young, census.reviews, c='r', label='young') ax[1].scatter(census.old, census.reviews, c='b', label='old') ax[0].set_title("ACS5 %s Yelp review rate by fraction young" % oname) ax[1].set_title("ACS5 %s Yelp review rate by fraction old" % oname) plt.savefig(oname+'_acs5_reviews_fraction_young_and_old.svg') # I wonder what it would look like wo Vegas census = census[census.GISJOIN.apply(lambda g: g[:3] != 'G32')] fig, ax = plt.subplots(2, 1) ax[0].set_yscale('log') ax[1].set_yscale('log') ax[0].scatter(census.young, census.reviews, c='r', label='young') ax[1].scatter(census.old, census.reviews, c='b', label='old') ax[0].set_title("ACS5 %s Yelp review rate by fraction young no Vegas" % oname) ax[1].set_title("ACS5 %s Yelp review rate by fraction old no Vegas" % oname) plt.savefig(oname+'_acs5_reviews_fraction_young_and_old_no_vegas.png')
{ "repo_name": "DistrictDataLabs/03-censusables", "path": "censusables/fivey.py", "copies": "1", "size": "3631", "license": "apache-2.0", "hash": -6740168412251856000, "line_mean": 32.6203703704, "line_max": 78, "alpha_frac": 0.6736436243, "autogenerated": false, "ratio": 2.610352264557872, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8778819121181104, "avg_score": 0.0010353535353535353, "num_lines": 108 }
"""acserver URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import include, url from django.contrib import admin from session.views import launch_preset, stop_preset, upgrade from library.views import process_assetcollection from session.views import PresetWizard admin.site.site_header = 'Assetto Corsa Server Manager' admin.site.site_title = 'Assetto Corsa Server Manager' urlpatterns = [ url(r'^admin/session/preset/(?P<preset_id>[0-9]+)/launch/$', launch_preset, name='launch_preset'), url(r'^admin/session/preset/(?P<preset_id>[0-9]+)/stop/$', stop_preset, name='stop_preset'), url(r'^admin/session/presetwizard/', PresetWizard.as_view()), url(r'^admin/upgrade/', upgrade, name='upgrade'), url(r'^admin/library/assetcollection/(?P<assetcollection_id>[0-9]+)/process/$', process_assetcollection, name='process_assetcollection'), url(r'^admin/', include(admin.site.urls)), url(r'^chaining/', include('smart_selects.urls')), ]
{ "repo_name": "PeteTheAutomator/ACServerManager", "path": "acserver/urls.py", "copies": "1", "size": "1483", "license": "mit", "hash": -5065840832467387000, "line_mean": 43.9393939394, "line_max": 141, "alpha_frac": 0.7113958193, "autogenerated": false, "ratio": 3.4488372093023254, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4660233028602325, "avg_score": null, "num_lines": null }
"""ACSE service provider""" import logging from typing import TYPE_CHECKING, Optional, Dict, List, cast, Tuple from pydicom.uid import UID from pynetdicom import evt, _config from pynetdicom._globals import APPLICATION_CONTEXT_NAME from pynetdicom.pdu_primitives import ( A_ASSOCIATE, A_RELEASE, A_ABORT, A_P_ABORT, AsynchronousOperationsWindowNegotiation, SOPClassCommonExtendedNegotiation, SOPClassExtendedNegotiation, UserIdentityNegotiation, ) from pynetdicom.presentation import ( negotiate_as_requestor, negotiate_as_acceptor, negotiate_unrestricted, ) if TYPE_CHECKING: # pragma: no cover from pynetdicom.association import Association, ServiceUser from pynetdicom.dul import DULServiceProvider from pynetdicom.transport import AssociationSocket LOGGER = logging.getLogger("pynetdicom.acse") class ACSE: """The Association Control Service Element (ACSE) service provider. The ACSE protocol handles association negotiation and establishment, and normal and abnormal release of an association. """ def __init__(self, assoc: "Association") -> None: """Create the ACSE service provider. Parameters ---------- assoc : association.Association The Association to provide ACSE services for. """ self._assoc = assoc @property def acceptor(self) -> "ServiceUser": """Return the *acceptor* :class:`~pynetdicom.association.ServiceUser`. """ return self.assoc.acceptor @property def acse_timeout(self) -> Optional[float]: """Return the ACSE timeout (in seconds).""" return self.assoc.acse_timeout @property def assoc(self) -> "Association": """Return the parent :class:`~pynetdicom.association.Association`. .. versionadded:: 1.3 """ return self._assoc def _check_async_ops( self ) -> Optional[AsynchronousOperationsWindowNegotiation]: """Check the user's response to an Asynchronous Operations request. .. currentmodule:: pynetdicom.pdu_primitives Returns ------- pdu_primitives.AsynchronousOperationsWindowNegotiation or None If the ``evt.EVT_ASYNC_OPS`` handler hasn't been implemented then returns ``None``, otherwise returns an :class:`AsynchronousOperationsWindowNegotiation` item with the default values for the number of operations invoked/performed (1, 1). """ # pylint: disable=broad-except try: # Response is always ignored as async ops is not supported inv, perf = self.requestor.asynchronous_operations _ = evt.trigger( self.assoc, evt.EVT_ASYNC_OPS, {"nr_invoked": inv, "nr_performed": perf} ) except NotImplementedError: return None except Exception as exc: LOGGER.error( "Exception raised in handler bound to 'evt.EVT_ASYNC_OPS'" ) LOGGER.exception(exc) item = AsynchronousOperationsWindowNegotiation() item.maximum_number_operations_invoked = 1 item.maximum_number_operations_performed = 1 return item def _check_sop_class_common_extended( self ) -> Dict[UID, SOPClassCommonExtendedNegotiation]: """Check the user's response to a SOP Class Common Extended request. Returns ------- dict The {SOP Class UID : SOPClassCommonExtendedNegotiation} items for the accepted SOP Class Common Extended negotiation items. """ # pylint: disable=broad-except try: rsp = evt.trigger( self.assoc, evt.EVT_SOP_COMMON, {"items": self.requestor.sop_class_common_extended} ) except Exception as exc: LOGGER.error( "Exception raised in handler bound to 'evt.EVT_SOP_COMMON'" ) LOGGER.exception(exc) return {} rsp = cast(Dict[UID, SOPClassCommonExtendedNegotiation], rsp) try: rsp = { uid: ii for uid, ii in rsp.items() if isinstance(ii, SOPClassCommonExtendedNegotiation) } except Exception as exc: LOGGER.error( "Invalid type returned by handler bound to " "'evt.EVT_SOP_COMMON'" ) LOGGER.exception(exc) return {} return rsp def _check_sop_class_extended(self) -> List[SOPClassExtendedNegotiation]: """Check the user's response to a SOP Class Extended request. Returns ------- list of pdu_primitives.SOPClassExtendedNegotiation The SOP Class Extended Negotiation items to be sent in response """ # pylint: disable=broad-except try: user_response = evt.trigger( self.assoc, evt.EVT_SOP_EXTENDED, {"app_info": self.requestor.sop_class_extended} ) except Exception as exc: user_response = {} LOGGER.error( "Exception raised in handler bound to 'evt.EVT_SOP_EXTENDED'" ) LOGGER.exception(exc) if not isinstance(user_response, (type(None), dict)): LOGGER.error( "Invalid type returned by handler bount to " "'evt.EVT_SOP_EXTENDED'" ) user_response = {} if not user_response: return [] items = [] for sop_class, app_info in user_response.items(): try: item = SOPClassExtendedNegotiation() item.sop_class_uid = sop_class item.service_class_application_information = app_info items.append(item) except Exception as exc: LOGGER.error( f"Unable to set the SOP Class Extended Negotiation " f"response values for the SOP Class UID {sop_class}" ) LOGGER.exception(exc) return items def _check_user_identity( self ) -> Tuple[bool, Optional[UserIdentityNegotiation]]: """Check the user's response to a User Identity request. Returns ------- bool True if the user identity has been confirmed, False otherwise. pdu_primitives.UserIdentityNegotiation or None The negotiation response, if a positive response is requested, otherwise None. """ # pylint: disable=broad-except # The UserIdentityNegotiation (request) item req = self.requestor.user_identity if req is None: return True, None try: rsp = evt.trigger( self.assoc, evt.EVT_USER_ID, { "user_id_type": req.user_identity_type, "primary_field": req.primary_field, "secondary_field": req.secondary_field, } ) except NotImplementedError: # If the user hasn't implemented identity negotiation then # default to accepting the association return True, None except Exception as exc: # If the user has implemented identity negotiation but an exception # occurred then reject the association LOGGER.error("Exception in handler bound to 'evt.EVT_USER_ID'") LOGGER.exception(exc) return False, None identity_verified, response = cast(Tuple[bool, Optional[bytes]], rsp) if not identity_verified: # Reject association as the user isn't authorised return False, None if req.user_identity_type in [3, 4, 5]: if req.positive_response_requested and response is not None: try: rsp = UserIdentityNegotiation() rsp.server_response = response return True, rsp except Exception as exc: # > If the acceptor doesn't support user identification it # > will accept the association without making a positive # > response LOGGER.error( "Unable to set the User Identity Negotiation's " "'server_response'" ) LOGGER.exception(exc) return True, None return True, None @property def dul(self) -> "DULServiceProvider": """Return the :class:`~pynetdicom.dul.DULServiceProvider`.""" return self.assoc.dul @property def socket(self) -> Optional["AssociationSocket"]: """Return the :class:`~pynetdicom.transport.AssociationSocket`.""" return self.assoc.dul.socket def is_aborted(self, abort_type: str = "both") -> bool: """Return ``True`` if an A-ABORT and/or A-P-ABORT request has been received. .. versionchanged:: 1.5 Added `abort_type` keyword parameter. Parameters ---------- abort_type : str, optional The type of abort to check for. If ``'both'`` then will return ``True`` if an A-ABORT or A-P-ABORT is received (default). If ``'a-abort'`` then will return ``True`` if an A-ABORT is received, if ``'a-p-abort'`` then will return ``True`` if an A-P-ABORT is received. Returns ------- bool ``True`` if an abort is received, ``False`` otherwise. """ # A-P-ABORT: # Connection closed, FSM received invalid event or DUL sent A-ABORT abort_classes = { "both": (A_ABORT, A_P_ABORT), "a-abort": (A_ABORT,), "a-p-abort": (A_P_ABORT,), } primitive = self.dul.peek_next_pdu() if isinstance(primitive, abort_classes[abort_type]): return True return False def is_release_requested(self) -> bool: """Return ``True`` if an A-RELEASE request has been received. .. versionadded:: 1.1 """ primitive = self.dul.peek_next_pdu() if isinstance(primitive, A_RELEASE) and primitive.result is None: _ = self.dul.receive_pdu(wait=False) return True return False def negotiate_association(self) -> None: """Perform an association negotiation as either the *requestor* or *acceptor*. """ if self.assoc.is_requestor: self._negotiate_as_requestor() elif self.assoc.is_acceptor: self._negotiate_as_acceptor() def _negotiate_as_acceptor(self) -> None: """Perform an association negotiation as the association *acceptor*. """ # For convenience assoc_rq = cast(A_ASSOCIATE, self.requestor.primitive) # Set the Requestor's AE Title self.requestor.ae_title = assoc_rq.calling_ae_title # If we reject association -> [result, source, diagnostic] reject_assoc_rsd: Tuple[int, ...] = tuple() # Calling AE Title not recognised authorised_aet = [s.strip() for s in self.assoc.ae.require_calling_aet] if ( self.assoc.ae.require_calling_aet and assoc_rq.calling_ae_title not in authorised_aet ): reject_assoc_rsd = (0x01, 0x01, 0x03) # Called AE Title not recognised if ( self.assoc.ae.require_called_aet and assoc_rq.called_ae_title != self.acceptor.ae_title.strip() ): reject_assoc_rsd = (0x01, 0x01, 0x07) # Extended Negotiation items # User Identity Negotiation items if self.requestor.user_identity: is_valid, id_response = self._check_user_identity() if not is_valid: # Transient, ACSE related, no reason given LOGGER.info("User identity failed verification") reject_assoc_rsd = (0x02, 0x02, 0x01) if id_response: # Add the User Identity Negotiation (response) item self.acceptor.add_negotiation_item(id_response) # SOP Class Extended Negotiation items for item in self._check_sop_class_extended(): self.acceptor.add_negotiation_item(item) # SOP Class Common Extended Negotiation items # Note: No response items are allowed # pylint: disable=protected-access self.acceptor._common_ext = self._check_sop_class_common_extended() # pylint: enable=protected-access # Asynchronous Operations Window Negotiation items if self.requestor.asynchronous_operations != (1, 1): async_rsp = self._check_async_ops() # Add any Async Ops (response) item if async_rsp: self.acceptor.add_negotiation_item(async_rsp) # DUL Presentation Related Rejections # Maximum number of associations reached (local-limit-exceeded) active_acceptors = [ tt for tt in self.assoc.ae.active_associations if tt.is_acceptor ] if len(active_acceptors) > self.assoc.ae.maximum_associations: reject_assoc_rsd = (0x02, 0x03, 0x02) if reject_assoc_rsd: # pylint: disable=no-value-for-parameter LOGGER.info("Rejecting Association") self.send_reject(*reject_assoc_rsd) evt.trigger(self.assoc, evt.EVT_REJECTED, {}) self.assoc.kill() return # Negotiate Presentation Contexts # SCP/SCU Role Selection Negotiation request items # {SOP Class UID : (SCU role, SCP role)} rq_roles = { uid: (item.scu_role, item.scp_role) for uid, item in self.requestor.role_selection.items() } if _config.UNRESTRICTED_STORAGE_SERVICE: result, ac_roles = negotiate_unrestricted( assoc_rq.presentation_context_definition_list, self.acceptor.supported_contexts, rq_roles ) else: result, ac_roles = negotiate_as_acceptor( assoc_rq.presentation_context_definition_list, self.acceptor.supported_contexts, rq_roles ) # pylint: disable=protected-access # Accepted contexts are stored as {context ID : context} self.assoc._accepted_cx = { cast(int, cx.context_id): cx for cx in result if cx.result == 0x00 } self.assoc._rejected_cx = [cx for cx in result if cx.result != 0x00] # pylint: enable=protected-access # Add any SCP/SCU Role Selection Negotiation response items for role_item in ac_roles: self.acceptor.add_negotiation_item(role_item) # Send the A-ASSOCIATE (accept) primitive LOGGER.info("Accepting Association") self.send_accept() # Callbacks/Logging evt.trigger(self.assoc, evt.EVT_ACCEPTED, {}) # Assocation established OK self.assoc.is_established = True evt.trigger(self.assoc, evt.EVT_ESTABLISHED, {}) def _negotiate_as_requestor(self) -> None: """Perform an association negotiation as the association *requestor*. """ if not self.requestor.requested_contexts: LOGGER.error( "One or more requested presentation contexts must be set " "prior to association negotiation" ) self.assoc.kill() return # Build and send an A-ASSOCIATE (request) PDU to the peer self.send_request() evt.trigger(self.assoc, evt.EVT_REQUESTED, {}) # Wait for the transport to be ready socket = cast("AssociationSocket", self.socket) socket._ready.wait() if not socket._is_connected: # Failed to connect self.assoc.abort() return # Wait for response rsp = self.dul.receive_pdu(wait=True, timeout=self.acse_timeout) # Association accepted or rejected if isinstance(rsp, A_ASSOCIATE): self.acceptor.primitive = rsp # Accepted if rsp.result == 0x00: # Handle SCP/SCU Role Selection response # Apply requestor's proposed SCP/SCU role selection (if any) # to the requested contexts rq_roles = { uid: (ii.scu_role, ii.scp_role) for uid, ii in self.requestor.role_selection.items() } if rq_roles: for cx in self.requestor.requested_contexts: try: (cx.scu_role, cx.scp_role) = rq_roles[ cast(UID, cx.abstract_syntax) ] # If no role was specified then use False # see SCP_SCU_RoleSelectionSubItem.from_primitive cx.scu_role = cx.scu_role or False cx.scp_role = cx.scp_role or False except KeyError: pass # Collate the acceptor's SCP/SCU role selection responses ac_roles = { uid: (ii.scu_role, ii.scp_role) for uid, ii in self.acceptor.role_selection.items() } # Check the negotiated presentation contexts results and # determine their agreed upon SCP/SCU roles negotiated_contexts = negotiate_as_requestor( self.requestor.requested_contexts, rsp.presentation_context_definition_results_list, ac_roles ) # pylint: disable=protected-access # Accepted contexts are stored as {context ID : context} self.assoc._accepted_cx = { cast(int, cx.context_id): cx for cx in negotiated_contexts if cx.result == 0x00 } self.assoc._rejected_cx = [ cx for cx in negotiated_contexts if cx.result != 0x00 ] # pylint: enable=protected-access evt.trigger(self.assoc, evt.EVT_ACCEPTED, {}) # No acceptable presentation contexts if not self.assoc.accepted_contexts: LOGGER.error("No accepted presentation contexts") self.send_abort(0x02) self.assoc.is_aborted = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() else: LOGGER.info("Association Accepted") self.assoc.is_established = True evt.trigger(self.assoc, evt.EVT_ESTABLISHED, {}) elif hasattr(rsp, "result") and rsp.result in [0x01, 0x02]: # 0x01 is rejected (permanent) # 0x02 is rejected (transient) LOGGER.error("Association Rejected") LOGGER.error( f"Result: {rsp.result_str}, Source: {rsp.source_str}" ) LOGGER.error(f"Reason: {rsp.reason_str}") self.assoc.is_rejected = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_REJECTED, {}) self.dul.kill_dul() else: LOGGER.error( "Received an invalid A-ASSOCIATE response from the peer" ) LOGGER.error("Aborting Association") self.send_abort(0x02) self.assoc.is_aborted = True self.assoc.is_established = False # Event handler - association aborted evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() # Association aborted elif isinstance(rsp, (A_ABORT, A_P_ABORT)): LOGGER.error("Association Aborted") self.assoc.is_established = False self.assoc.is_aborted = True evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.dul.kill_dul() elif rsp is None: # ACSE timeout LOGGER.error( "ACSE timeout reached while waiting for response to " "association request" ) self.assoc.abort() else: # Received A-RELEASE or some weird object self.assoc.is_established = False self.dul.kill_dul() def negotiate_release(self) -> None: """Negotiate association release. .. versionadded:: 1.1 Once an A-RELEASE request has been sent any received P-DATA PDUs will be ignored. """ # Send A-RELEASE request # Only an A-ABORT request primitive is allowed after A-RELEASE starts # (Part 8, Section 7.2.2) self.send_release(is_response=False) # We need to wait for a reply and need to handle: # P-DATA primitives # A-ABORT request primitives # A-RELEASE collisions is_collision = False while True: primitive = self.dul.receive_pdu( wait=True, timeout=self.acse_timeout ) if primitive is None: # No response received within timeout window LOGGER.info("Aborting Association") self.send_abort(0x02) self.assoc.is_aborted = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() return if isinstance(primitive, (A_ABORT, A_P_ABORT)): # Received A-ABORT/A-P-ABORT during association release LOGGER.info("Association Aborted") self.assoc.is_aborted = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() return # Any other primitive besides A_RELEASE gets trashed elif not isinstance(primitive, A_RELEASE): # Should only be P-DATA LOGGER.warning( "P-DATA received after Association release, data has " "been lost" ) continue # Must be A-RELEASE, but may be either request or release if primitive.result is None: # A-RELEASE (request) received, therefore an # A-RELEASE collision has occurred (Part 8, Section 7.2.2.7) LOGGER.debug("An A-RELEASE collision has occurred") is_collision = True if self.assoc.is_requestor: # Send A-RELEASE response self.send_release(is_response=True) # Wait for A-RELEASE response continue # Acceptor waits for A-RELEASE response before # sending their own response else: # A-RELEASE (response) received # If collision and we are the acceptor then we need to send # the A-RELEASE (response) to the requestor if self.assoc.is_acceptor and is_collision: self.send_release(is_response=True) self.assoc.is_released = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_RELEASED, {}) self.assoc.kill() return @property def requestor(self) -> "ServiceUser": """Return the *requestor* :class:`~pynetdicom.association.ServiceUser`. """ return self.assoc.requestor def send_abort(self, source: int) -> None: """Send an A-ABORT request to the peer. Parameters ---------- source : int The source of the abort request - ``0x00`` - the DUL service user - ``0x02`` - the DUL service provider Raises ------ ValueError If the `source` value is invalid. """ if source not in [0x00, 0x02]: raise ValueError("Invalid 'source' parameter value") # The following parameters must be set for an A-ABORT primitive # (* sent in A-ABORT PDU): # Abort Source* # Provider Reason* (not significant with source 0x00) primitive = A_ABORT() primitive.abort_source = source self.dul.send_pdu(primitive) self.assoc.is_aborted = True self.assoc.is_established = False def send_accept(self) -> None: """Send an A-ASSOCIATE (accept) to the peer.""" # The following parameters must be set for an A-ASSOCIATE (accept) # primitive (* sent in A-ASSOCIATE-AC PDU): # Application Context Name* # Calling AE Title* (but not to be tested) # Called AE Title* (but not to be tested) # User Information # Maximum PDV Length* # Implementation Class UID* # Result # Result Source # Presentation Context Definition List Result* req = cast(A_ASSOCIATE, self.requestor.primitive) primitive = A_ASSOCIATE() primitive.application_context_name = UID(APPLICATION_CONTEXT_NAME) primitive.calling_ae_title = req.calling_ae_title primitive.called_ae_title = req.called_ae_title primitive.result = 0x00 primitive.result_source = 0x01 primitive.presentation_context_definition_results_list = ( self.assoc.accepted_contexts + self.assoc.rejected_contexts ) # User Information - PS3.7 Annex D.3.3 primitive.user_information = self.acceptor.user_information self.acceptor.primitive = primitive self.dul.send_pdu(primitive) def send_ap_abort(self, reason: int) -> None: """Send an A-P-ABORT to the peer. Parameters ---------- reason : int The reason for aborting the association, one of the following: - ``0x00`` - reason not specified - ``0x01`` - unrecognised PDU - ``0x02`` - unexpected PDU - ``0x04`` - unrecognised PDU parameter - ``0x05`` - unexpected PDU parameter - ``0x06`` - invalid PDU parameter value Raises ------ ValueError If the `reason` value is invalid. """ if reason not in [0x00, 0x01, 0x02, 0x04, 0x05, 0x06]: raise ValueError("Invalid 'reason' parameter value") # The following parameters must be set for an A-P-ABORT primitive # (* sent in A-ABORT PDU): # Abort Source* (always 0x02) # Provider Reason* primitive = A_P_ABORT() primitive.provider_reason = reason self.dul.send_pdu(primitive) self.assoc.is_aborted = True self.assoc.is_established = False def send_reject(self, result: int, source: int, diagnostic: int) -> None: """Send an A-ASSOCIATE (reject) to the peer. Parameters ---------- result : int The association rejection: - ``0x01`` - rejected permanent - ``0x02`` - rejected transient source : int The source of the rejection: - ``0x01`` - DUL service user - ``0x02`` - DUL service provider (ACSE related) - ``0x03`` - DUL service provider (presentation related) diagnostic : int The reason for the rejection, if the `source` is ``0x01``: - ``0x01`` - no reason given - ``0x02`` - application context name not supported - ``0x03`` - calling AE title not recognised - ``0x07`` - called AE title not recognised If the `source` is ``0x02``: - ``0x01`` - no reason given - ``0x02`` - protocol version not supported If the `source` is ``0x03``: - ``0x01`` - temporary congestion - ``0x02`` - local limit exceeded """ if result not in [0x01, 0x02]: raise ValueError("Invalid 'result' parameter value") _valid_reason_diagnostic = { 0x01: [0x01, 0x02, 0x03, 0x07], 0x02: [0x01, 0x02], 0x03: [0x01, 0x02], } try: if diagnostic not in _valid_reason_diagnostic[source]: raise ValueError( "Invalid 'diagnostic' parameter value" ) except KeyError: raise ValueError("Invalid 'source' parameter value") # The following parameters must be set for an A-ASSOCIATE (reject) # primitive (* sent in A-ASSOCIATE-RJ PDU): # Result* # Result Source* # Diagnostic* primitive = A_ASSOCIATE() primitive.result = result primitive.result_source = source primitive.diagnostic = diagnostic self.acceptor.primitive = primitive self.dul.send_pdu(primitive) self.assoc.is_rejected = True self.assoc.is_established = False def send_release(self, is_response: bool = False) -> None: """Send an A-RELEASE (request or response) to the peer. Parameters ---------- is_response : bool, optional ``True`` to send an A-RELEASE (response) to the peer, ``False`` to send an A-RELEASE (request) to the peer (default). """ primitive = A_RELEASE() if is_response: primitive.result = "affirmative" self.dul.send_pdu(primitive) def send_request(self) -> None: """Send an A-ASSOCIATE (request) to the peer.""" # The following parameters must be set for a request primitive # (* sent in A-ASSOCIATE-RQ PDU) # Application Context Name* # Calling AE Title* # Called AE Title* # UserInformation* # Maximum PDV Length* # Implementation Class UID* # Calling Presentation Address # Called Presentation Address # Presentation Context Definition List* primitive = A_ASSOCIATE() # DICOM Application Context Name, see PS3.7 Annex A.2.1 primitive.application_context_name = UID(APPLICATION_CONTEXT_NAME) # Calling AE Title is the source DICOM AE title primitive.calling_ae_title = self.requestor.ae_title # Called AE Title is the destination DICOM AE title primitive.called_ae_title = self.acceptor.ae_title # The TCP/IP address of the source, pynetdicom includes port too primitive.calling_presentation_address = ( cast(str, self.requestor.address), cast(int, self.requestor.port) ) # The TCP/IP address of the destination, pynetdicom includes port too primitive.called_presentation_address = ( cast(str, self.acceptor.address), cast(int, self.acceptor.port) ) # Proposed presentation contexts primitive.presentation_context_definition_list = ( self.requestor.requested_contexts ) # User Information - PS3.7 Annex D.3.3 # Mandatory items: # Maximum Length Notification (1) # Implementation Class UID Notification (1) # Optional notification items: # Implementation Version Name Notification (0 or 1) # Optional negotiation items: # SCP/SCU Role Selection Negotiation (0 or N) # Asynchronous Operations Window Negotiation (0 or 1) # SOP Class Extended Negotiation (0 or N) # SOP Class Common Extended Negotiation (0 or N) # User Identity Negotiation (0 or 1) primitive.user_information = self.requestor.user_information # Save the request primitive self.requestor.primitive = primitive # Send the A-ASSOCIATE request primitive to the peer self.dul.send_pdu(primitive)
{ "repo_name": "scaramallion/pynetdicom", "path": "pynetdicom/acse.py", "copies": "1", "size": "32783", "license": "mit", "hash": -2744636837796871700, "line_mean": 35.6700223714, "line_max": 79, "alpha_frac": 0.5588262209, "autogenerated": false, "ratio": 4.3158241179568195, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5374650338856819, "avg_score": null, "num_lines": null }
"""ACSE service provider""" import logging from pynetdicom import evt from pynetdicom._globals import APPLICATION_CONTEXT_NAME from pynetdicom.pdu_primitives import ( A_ASSOCIATE, A_RELEASE, A_ABORT, A_P_ABORT, AsynchronousOperationsWindowNegotiation, SOPClassCommonExtendedNegotiation, SOPClassExtendedNegotiation, UserIdentityNegotiation, ) from pynetdicom.presentation import ( negotiate_as_requestor, negotiate_as_acceptor ) LOGGER = logging.getLogger('pynetdicom.acse') class ACSE(object): """The Association Control Service Element (ACSE) service provider. The ACSE protocol handles association negotiation and establishment, and normal and abnormal release of an association. """ def __init__(self, assoc): """Create the ACSE service provider. Parameters ---------- assoc : association.Association The Association to provide ACSE services for. """ self._assoc = assoc @property def acceptor(self): """Return the *acceptor* :class:`~pynetdicom.association.ServiceUser`. """ return self.assoc.acceptor @property def acse_timeout(self): """Return the ACSE timeout (in seconds).""" return self.assoc.acse_timeout @property def assoc(self): """Return the parent :class:`~pynetdicom.association.Association`. .. versionadded:: 1.3 """ return self._assoc def _check_async_ops(self): """Check the user's response to an Asynchronous Operations request. .. currentmodule:: pynetdicom.pdu_primitives Returns ------- pdu_primitives.AsynchronousOperationsWindowNegotiation or None If the ``evt.EVT_ASYNC_OPS`` handler hasn't been implemented then returns ``None``, otherwise returns an :class:`AsynchronousOperationsWindowNegotiation` item with the default values for the number of operations invoked/performed (1, 1). """ # pylint: disable=broad-except try: # Response is always ignored as async ops is not supported inv, perf = self.requestor.asynchronous_operations _ = evt.trigger( self.assoc, evt.EVT_ASYNC_OPS, {'nr_invoked' : inv, 'nr_performed' : perf} ) except NotImplementedError: return None except Exception as exc: LOGGER.error( "Exception raised in handler bound to 'evt.EVT_ASYNC_OPS'" ) LOGGER.exception(exc) item = AsynchronousOperationsWindowNegotiation() item.maximum_number_operations_invoked = 1 item.maximum_number_operations_performed = 1 return item def _check_sop_class_common_extended(self): """Check the user's response to a SOP Class Common Extended request. Returns ------- dict The {SOP Class UID : SOPClassCommonExtendedNegotiation} items for the accepted SOP Class Common Extended negotiation items. """ # pylint: disable=broad-except try: rsp = evt.trigger( self.assoc, evt.EVT_SOP_COMMON, {'items' : self.requestor.sop_class_common_extended} ) except Exception as exc: LOGGER.error( "Exception raised in handler bound to 'evt.EVT_SOP_COMMON'" ) LOGGER.exception(exc) return {} try: rsp = { uid:ii for uid, ii in rsp.items() if isinstance(ii, SOPClassCommonExtendedNegotiation) } except Exception as exc: LOGGER.error( "Invalid type returned by handler bound to " "'evt.EVT_SOP_COMMON'" ) LOGGER.exception(exc) return {} return rsp def _check_sop_class_extended(self): """Check the user's response to a SOP Class Extended request. Returns ------- list of pdu_primitives.SOPClassExtendedNegotiation The SOP Class Extended Negotiation items to be sent in response """ # pylint: disable=broad-except try: user_response = evt.trigger( self.assoc, evt.EVT_SOP_EXTENDED, {'app_info' : self.requestor.sop_class_extended} ) except Exception as exc: user_response = {} LOGGER.error( "Exception raised in handler bound to 'evt.EVT_SOP_EXTENDED'" ) LOGGER.exception(exc) if not isinstance(user_response, (type(None), dict)): LOGGER.error( "Invalid type returned by handler bount to " "'evt.EVT_SOP_EXTENDED'" ) user_response = {} if not user_response: return [] items = [] for sop_class, app_info in user_response.items(): try: item = SOPClassExtendedNegotiation() item.sop_class_uid = sop_class item.service_class_application_information = app_info items.append(item) except Exception as exc: LOGGER.error( f"Unable to set the SOP Class Extended Negotiation " f"response values for the SOP Class UID {sop_class}" ) LOGGER.exception(exc) return items def _check_user_identity(self): """Check the user's response to a User Identity request. Returns ------- bool True if the user identity has been confirmed, False otherwise. pdu_primitives.UserIdentityNegotiation or None The negotiation response, if a positive response is requested, otherwise None. """ # pylint: disable=broad-except # The UserIdentityNegotiation (request) item req = self.requestor.user_identity try: identity_verified, response = evt.trigger( self.assoc, evt.EVT_USER_ID, { 'user_id_type' : req.user_identity_type, 'primary_field' : req.primary_field, 'secondary_field' : req.secondary_field, } ) except NotImplementedError: # If the user hasn't implemented identity negotiation then # default to accepting the association return True, None except Exception as exc: # If the user has implemented identity negotiation but an exception # occurred then reject the association LOGGER.error("Exception in handler bound to 'evt.EVT_USER_ID'") LOGGER.exception(exc) return False, None if not identity_verified: # Reject association as the user isn't authorised return False, None if req.user_identity_type in [3, 4, 5]: if req.positive_response_requested and response is not None: try: rsp = UserIdentityNegotiation() rsp.server_response = response return True, rsp except Exception as exc: # > If the acceptor doesn't support user identification it # > will accept the association without making a positive # > response LOGGER.error( "Unable to set the User Identity Negotiation's " "'server_response'" ) LOGGER.exception(exc) return True, None return True, None @property def dul(self): """Return the :class:`~pynetdicom.dul.DULServiceProvider`.""" return self.assoc.dul def is_aborted(self, abort_type='both'): """Return ``True`` if an A-ABORT and/or A-P-ABORT request has been received. .. versionchanged:: 1.5 Added `abort_type` keyword parameter. Parameters ---------- abort_type : str, optional The type of abort to check for. If ``'both'`` then will return ``True`` if an A-ABORT or A-P-ABORT is received (default). If ``'a-abort'`` then will return ``True`` if an A-ABORT is received, if ``'a-p-abort'`` then will return ``True`` if an A-P-ABORT is received. Returns ------- bool ``True`` if an abort is received, ``False`` otherwise. """ # A-P-ABORT: # Connection closed, FSM received invalid event or DUL sent A-ABORT abort_classes = { 'both': (A_ABORT, A_P_ABORT), 'a-abort': (A_ABORT, ), 'a-p-abort': (A_P_ABORT, ), } primitive = self.dul.peek_next_pdu() if isinstance(primitive, abort_classes[abort_type]): return True return False def is_release_requested(self): """Return ``True`` if an A-RELEASE request has been received. .. versionadded:: 1.1 """ primitive = self.dul.peek_next_pdu() if isinstance(primitive, A_RELEASE) and primitive.result is None: _ = self.dul.receive_pdu(wait=False) return True return False def negotiate_association(self): """Perform an association negotiation as either the *requestor* or *acceptor*. """ if self.assoc.is_requestor: self._negotiate_as_requestor() elif self.assoc.is_acceptor: self._negotiate_as_acceptor() def _negotiate_as_acceptor(self): """Perform an association negotiation as the association *acceptor*. """ # For convenience assoc_rq = self.requestor.primitive # Set the Requestor's AE Title self.requestor.ae_title = assoc_rq.calling_ae_title # If we reject association -> [result, source, diagnostic] reject_assoc_rsd = [] # Calling AE Title not recognised if (self.assoc.ae.require_calling_aet and assoc_rq.calling_ae_title not in self.assoc.ae.require_calling_aet): reject_assoc_rsd = [0x01, 0x01, 0x03] # Called AE Title not recognised if (self.assoc.ae.require_called_aet and assoc_rq.called_ae_title != self.acceptor.ae_title): reject_assoc_rsd = [0x01, 0x01, 0x07] ## Extended Negotiation items # User Identity Negotiation items if self.requestor.user_identity: is_valid, id_response = self._check_user_identity() if not is_valid: # Transient, ACSE related, no reason given LOGGER.info("User identity failed verification") reject_assoc_rsd = [0x02, 0x02, 0x01] if id_response: # Add the User Identity Negotiation (response) item self.acceptor.add_negotiation_item(id_response) # SOP Class Extended Negotiation items for item in self._check_sop_class_extended(): self.acceptor.add_negotiation_item(item) # SOP Class Common Extended Negotiation items # Note: No response items are allowed # pylint: disable=protected-access self.acceptor._common_ext = self._check_sop_class_common_extended() # pylint: enable=protected-access # Asynchronous Operations Window Negotiation items if self.requestor.asynchronous_operations != (1, 1): async_rsp = self._check_async_ops() # Add any Async Ops (response) item if async_rsp: self.acceptor.add_negotiation_item(async_rsp) ## DUL Presentation Related Rejections # Maximum number of associations reached (local-limit-exceeded) active_acceptors = [ tt for tt in self.assoc.ae.active_associations if tt.is_acceptor ] if len(active_acceptors) > self.assoc.ae.maximum_associations: reject_assoc_rsd = [0x02, 0x03, 0x02] if reject_assoc_rsd: # pylint: disable=no-value-for-parameter LOGGER.info("Rejecting Association") self.send_reject(*reject_assoc_rsd) evt.trigger(self.assoc, evt.EVT_REJECTED, {}) self.assoc.kill() return ## Negotiate Presentation Contexts # SCP/SCU Role Selection Negotiation request items # {SOP Class UID : (SCU role, SCP role)} rq_roles = { uid:(item.scu_role, item.scp_role) for uid, item in self.requestor.role_selection.items() } result, ac_roles = negotiate_as_acceptor( assoc_rq.presentation_context_definition_list, self.acceptor.supported_contexts, rq_roles ) # pylint: disable=protected-access # Accepted contexts are stored as {context ID : context} self.assoc._accepted_cx = { cx.context_id:cx for cx in result if cx.result == 0x00 } self.assoc._rejected_cx = [cx for cx in result if cx.result != 0x00] # pylint: enable=protected-access # Add any SCP/SCU Role Selection Negotiation response items for item in ac_roles: self.acceptor.add_negotiation_item(item) # Send the A-ASSOCIATE (accept) primitive LOGGER.info("Accepting Association") self.send_accept() # Callbacks/Logging evt.trigger(self.assoc, evt.EVT_ACCEPTED, {}) # Assocation established OK self.assoc.is_established = True evt.trigger(self.assoc, evt.EVT_ESTABLISHED, {}) def _negotiate_as_requestor(self): """Perform an association negotiation as the association *requestor*.""" if not self.requestor.requested_contexts: LOGGER.error( "One or more requested presentation contexts must be set " "prior to association negotiation" ) self.assoc.kill() return # Build and send an A-ASSOCIATE (request) PDU to the peer self.send_request() evt.trigger(self.assoc, evt.EVT_REQUESTED, {}) # Wait for response rsp = self.dul.receive_pdu(wait=True, timeout=self.acse_timeout) # Association accepted or rejected if isinstance(rsp, A_ASSOCIATE): self.acceptor.primitive = rsp # Accepted if rsp.result == 0x00: ## Handle SCP/SCU Role Selection response # Apply requestor's proposed SCP/SCU role selection (if any) # to the requested contexts rq_roles = { uid:(ii.scu_role, ii.scp_role) for uid, ii in self.requestor.role_selection.items() } if rq_roles: for cx in self.requestor.requested_contexts: try: (cx.scu_role, cx.scp_role) = rq_roles[ cx.abstract_syntax ] # If no role was specified then use False # see SCP_SCU_RoleSelectionSubItem.from_primitive cx.scu_role = cx.scu_role or False cx.scp_role = cx.scp_role or False except KeyError: pass # Collate the acceptor's SCP/SCU role selection responses ac_roles = { uid:(ii.scu_role, ii.scp_role) for uid, ii in self.acceptor.role_selection.items() } # Check the negotiated presentation contexts results and # determine their agreed upon SCP/SCU roles negotiated_contexts = negotiate_as_requestor( self.requestor.requested_contexts, rsp.presentation_context_definition_results_list, ac_roles ) # pylint: disable=protected-access # Accepted contexts are stored as {context ID : context} self.assoc._accepted_cx = { cx.context_id:cx for cx in negotiated_contexts if cx.result == 0x00 } self.assoc._rejected_cx = [ cx for cx in negotiated_contexts if cx.result != 0x00 ] # pylint: enable=protected-access evt.trigger(self.assoc, evt.EVT_ACCEPTED, {}) # No acceptable presentation contexts if not self.assoc.accepted_contexts: LOGGER.error("No accepted presentation contexts") self.send_abort(0x02) self.assoc.is_aborted = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() else: LOGGER.info('Association Accepted') self.assoc.is_established = True evt.trigger(self.assoc, evt.EVT_ESTABLISHED, {}) elif hasattr(rsp, 'result') and rsp.result in [0x01, 0x02]: # 0x01 is rejected (permanent) # 0x02 is rejected (transient) LOGGER.error('Association Rejected') LOGGER.error( f"Result: {rsp.result_str}, Source: {rsp.source_str}" ) LOGGER.error(f"Reason: {rsp.reason_str}") self.assoc.is_rejected = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_REJECTED, {}) self.dul.kill_dul() else: LOGGER.error( "Received an invalid A-ASSOCIATE response from the peer" ) LOGGER.error("Aborting Association") self.send_abort(0x02) self.assoc.is_aborted = True self.assoc.is_established = False # Event handler - association aborted evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() # Association aborted elif isinstance(rsp, (A_ABORT, A_P_ABORT)): LOGGER.error("Association Aborted") self.assoc.is_established = False self.assoc.is_aborted = True evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.dul.kill_dul() elif rsp is None: # ACSE timeout LOGGER.error( "ACSE timeout reached while waiting for response to " "association request" ) self.assoc.abort() else: # Received A-RELEASE or some weird object self.assoc.is_established = False self.dul.kill_dul() def negotiate_release(self): """Negotiate association release. .. versionadded:: 1.1 Once an A-RELEASE request has been sent any received P-DATA PDUs will be ignored. """ # Send A-RELEASE request # Only an A-ABORT request primitive is allowed after A-RELEASE starts # (Part 8, Section 7.2.2) self.send_release(is_response=False) # We need to wait for a reply and need to handle: # P-DATA primitives # A-ABORT request primitives # A-RELEASE collisions is_collision = False while True: primitive = self.dul.receive_pdu( wait=True, timeout=self.acse_timeout ) if primitive is None: # No response received within timeout window LOGGER.info("Aborting Association") self.send_abort(0x02) self.assoc.is_aborted = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() return if isinstance(primitive, (A_ABORT, A_P_ABORT)): # Received A-ABORT/A-P-ABORT during association release LOGGER.info("Association Aborted") self.assoc.is_aborted = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_ABORTED, {}) self.assoc.kill() return # Any other primitive besides A_RELEASE gets trashed elif not isinstance(primitive, A_RELEASE): # Should only be P-DATA LOGGER.warning( "P-DATA received after Association release, data has " "been lost" ) continue # Must be A-RELEASE, but may be either request or release if primitive.result is None: # A-RELEASE (request) received, therefore an # A-RELEASE collision has occurred (Part 8, Section 7.2.2.7) LOGGER.debug("An A-RELEASE collision has occurred") is_collision = True if self.assoc.is_requestor: # Send A-RELEASE response self.send_release(is_response=True) # Wait for A-RELEASE response continue # Acceptor waits for A-RELEASE response before # sending their own response else: # A-RELEASE (response) received # If collision and we are the acceptor then we need to send # the A-RELEASE (response) to the requestor if self.assoc.is_acceptor and is_collision: self.send_release(is_response=True) self.assoc.is_released = True self.assoc.is_established = False evt.trigger(self.assoc, evt.EVT_RELEASED, {}) self.assoc.kill() return @property def requestor(self): """Return the *requestor* :class:`~pynetdicom.association.ServiceUser`. """ return self.assoc.requestor def send_abort(self, source): """Send an A-ABORT request to the peer. Parameters ---------- source : int The source of the abort request - ``0x00`` - the DUL service user - ``0x02`` - the DUL service provider Raises ------ ValueError If the `source` value is invalid. """ if source not in [0x00, 0x02]: raise ValueError("Invalid 'source' parameter value") # The following parameters must be set for an A-ABORT primitive # (* sent in A-ABORT PDU): # Abort Source* # Provider Reason* (not significant with source 0x00) primitive = A_ABORT() primitive.abort_source = source self.dul.send_pdu(primitive) self.assoc.is_aborted = True self.assoc.is_established = False def send_accept(self): """Send an A-ASSOCIATE (accept) to the peer.""" # The following parameters must be set for an A-ASSOCIATE (accept) # primitive (* sent in A-ASSOCIATE-AC PDU): # Application Context Name* # Calling AE Title* (but not to be tested) # Called AE Title* (but not to be tested) # User Information # Maximum PDV Length* # Implementation Class UID* # Result # Result Source # Presentation Context Definition List Result* primitive = A_ASSOCIATE() primitive.application_context_name = APPLICATION_CONTEXT_NAME primitive.calling_ae_title = self.requestor.primitive.calling_ae_title primitive.called_ae_title = self.requestor.primitive.called_ae_title primitive.result = 0x00 primitive.result_source = 0x01 primitive.presentation_context_definition_results_list = ( self.assoc.accepted_contexts + self.assoc.rejected_contexts ) ## User Information - PS3.7 Annex D.3.3 primitive.user_information = self.acceptor.user_information self.acceptor.primitive = primitive self.dul.send_pdu(primitive) def send_ap_abort(self, reason): """Send an A-P-ABORT to the peer. Parameters ---------- reason : int The reason for aborting the association, one of the following: - ``0x00`` - reason not specified - ``0x01`` - unrecognised PDU - ``0x02`` - unexpected PDU - ``0x04`` - unrecognised PDU parameter - ``0x05`` - unexpected PDU parameter - ``0x06`` - invalid PDU parameter value Raises ------ ValueError If the `reason` value is invalid. """ if reason not in [0x00, 0x01, 0x02, 0x04, 0x05, 0x06]: raise ValueError("Invalid 'reason' parameter value") # The following parameters must be set for an A-P-ABORT primitive # (* sent in A-ABORT PDU): # Abort Source* (always 0x02) # Provider Reason* primitive = A_P_ABORT() primitive.provider_reason = reason self.dul.send_pdu(primitive) self.assoc.is_aborted = True self.assoc.is_established = False def send_reject(self, result, source, diagnostic): """Send an A-ASSOCIATE (reject) to the peer. Parameters ---------- result : int The association rejection: - ``0x01`` - rejected permanent - ``0x02`` - rejected transient source : int The source of the rejection: - ``0x01`` - DUL service user - ``0x02`` - DUL service provider (ACSE related) - ``0x03`` - DUL service provider (presentation related) diagnostic : int The reason for the rejection, if the `source` is ``0x01``: - ``0x01`` - no reason given - ``0x02`` - application context name not supported - ``0x03`` - calling AE title not recognised - ``0x07`` - called AE title not recognised If the `source` is ``0x02``: - ``0x01`` - no reason given - ``0x02`` - protocol version not supported If the `source` is ``0x03``: - ``0x01`` - temporary congestion - ``0x02`` - local limit exceeded """ if result not in [0x01, 0x02]: raise ValueError("Invalid 'result' parameter value") _valid_reason_diagnostic = { 0x01 : [0x01, 0x02, 0x03, 0x07], 0x02 : [0x01, 0x02], 0x03 : [0x01, 0x02], } try: if diagnostic not in _valid_reason_diagnostic[source]: raise ValueError( "Invalid 'diagnostic' parameter value" ) except KeyError: raise ValueError("Invalid 'source' parameter value") # The following parameters must be set for an A-ASSOCIATE (reject) # primitive (* sent in A-ASSOCIATE-RJ PDU): # Result* # Result Source* # Diagnostic* primitive = A_ASSOCIATE() primitive.result = result primitive.result_source = source primitive.diagnostic = diagnostic self.acceptor.primitive = primitive self.dul.send_pdu(primitive) self.assoc.is_rejected = True self.assoc.is_established = False def send_release(self, is_response=False): """Send an A-RELEASE (request or response) to the peer. Parameters ---------- is_response : bool, optional ``True`` to send an A-RELEASE (response) to the peer, ``False`` to send an A-RELEASE (request) to the peer (default). """ primitive = A_RELEASE() if is_response: primitive.result = "affirmative" self.dul.send_pdu(primitive) def send_request(self): """Send an A-ASSOCIATE (request) to the peer.""" # The following parameters must be set for a request primitive # (* sent in A-ASSOCIATE-RQ PDU) # Application Context Name* # Calling AE Title* # Called AE Title* # UserInformation* # Maximum PDV Length* # Implementation Class UID* # Calling Presentation Address # Called Presentation Address # Presentation Context Definition List* primitive = A_ASSOCIATE() # DICOM Application Context Name, see PS3.7 Annex A.2.1 primitive.application_context_name = APPLICATION_CONTEXT_NAME # Calling AE Title is the source DICOM AE title primitive.calling_ae_title = self.requestor.ae_title # Called AE Title is the destination DICOM AE title primitive.called_ae_title = self.acceptor.ae_title # The TCP/IP address of the source, pynetdicom includes port too primitive.calling_presentation_address = ( self.requestor.address, self.requestor.port ) # The TCP/IP address of the destination, pynetdicom includes port too primitive.called_presentation_address = ( self.acceptor.address, self.acceptor.port ) # Proposed presentation contexts primitive.presentation_context_definition_list = ( self.requestor.requested_contexts ) ## User Information - PS3.7 Annex D.3.3 # Mandatory items: # Maximum Length Notification (1) # Implementation Class UID Notification (1) # Optional notification items: # Implementation Version Name Notification (0 or 1) # Optional negotiation items: # SCP/SCU Role Selection Negotiation (0 or N) # Asynchronous Operations Window Negotiation (0 or 1) # SOP Class Extended Negotiation (0 or N) # SOP Class Common Extended Negotiation (0 or N) # User Identity Negotiation (0 or 1) primitive.user_information = self.requestor.user_information # Save the request primitive self.requestor.primitive = primitive # Send the A-ASSOCIATE request primitive to the peer self.dul.send_pdu(primitive)
{ "repo_name": "scaramallion/pynetdicom3", "path": "pynetdicom/acse.py", "copies": "1", "size": "30789", "license": "mit", "hash": -7199124405799586000, "line_mean": 35.6535714286, "line_max": 80, "alpha_frac": 0.5554905973, "autogenerated": false, "ratio": 4.351187111362352, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5406677708662352, "avg_score": null, "num_lines": null }
# acsn2hugo.py -- Convert ACSN entities network to HUGO genes network # # jean-daniel.granet@mines-paristech.fr import sys import argparse def main(): # get arguments : parser = argparse.ArgumentParser(description='Convert ACSN entities network to HUGO genes network') parser.add_argument('acsn_network_fname', help='ACSN entities network to convert, in .sif format') parser.add_argument('correspondances_fname', help='reference file : correspondance between ACSN entities and HUGO genes symbols, in .gmt format') parser.add_argument('output_fname', help='file to create : genes network using HUGO gene symbols, in sif format') args = parser.parse_args() correspondances = dict() # read the correspondances file and save into a dictionary : # the key is the acsn gene name and the value is the hugo gene name # correspondances [ "acsn entity name" ] = "hugo\tgene\tnames\tin\tthe\tacsn\tentity" with open(args.correspondances_fname, 'r') as fdCorrespondances: # File structure : acsn entity name\t description (=na)\tgene\list\in\tHUGO\symbol for line in fdCorrespondances: line_split = line.split('\tna\t') ACSN_entity = line_split[0].strip() HUGO_gene_symbols_in_ACSN_entity = line_split[1].strip().split('\t') if ACSN_entity not in correspondances : correspondances[ACSN_entity] = set() correspondances[ACSN_entity].update(HUGO_gene_symbols_in_ACSN_entity) #do not keep duplicates fdCorrespondances.close() # read the acsn network file and write the corresponding hugo network into the output file with open(args.acsn_network_fname, 'r') as fdAcsn: # File structure : acsn entity name 1\tName of relationship\t acsn entity name 2 (\t PMIDS;PMIDS;PMIDS...) # even if not sif and has header : no problem since no corresponding HUGO symbol will be found with open(args.output_fname, 'w') as fdOut: #File structure : hugo gene symbol\tName of relationship\thugo gene symbol for line in fdAcsn: # "acsn entity name 1\tName of relationship\t acsn entity name 2" line_split = line.split('\t')# ["acsn entity name 1","Name of relationship","acsn entity name 2"] ACSN_entity_A = line_split[0].strip() ACSN_entity_B = line_split[2].strip() HUGO_genes_symbols_in_ACSN_entity_A = correspondances.get(ACSN_entity_A) HUGO_genes_symbols_in_ACSN_entity_B = correspondances.get(ACSN_entity_B) if HUGO_genes_symbols_in_ACSN_entity_A and HUGO_genes_symbols_in_ACSN_entity_B : for elemA in HUGO_genes_symbols_in_ACSN_entity_A: for elemB in HUGO_genes_symbols_in_ACSN_entity_B: fdOut.write("%s\n" % "\n".join(["%s %s %s" % (elemA, line_split[1], elemB)])) fdOut.close() fdAcsn.close() if __name__ == "__main__": main()
{ "repo_name": "chagaz/sfan", "path": "code/acsn2hugo.py", "copies": "1", "size": "3033", "license": "mit", "hash": 5090370973253340000, "line_mean": 56.2264150943, "line_max": 149, "alpha_frac": 0.6458951533, "autogenerated": false, "ratio": 3.4505119453924915, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9514782737898755, "avg_score": 0.016324872158747242, "num_lines": 53 }
'''acs.py - azurerm functions for the Azure Container Service''' import json from .restfns import do_delete, do_get, do_put from .settings import get_rm_endpoint, ACS_API def create_container_service(access_token, subscription_id, resource_group, service_name, \ agent_count, agent_vm_size, agent_dns, master_dns, admin_user, location, public_key=None,\ master_count=3, orchestrator='DCOS', app_id=None, app_secret=None, admin_password=None, \ ostype='Linux'): '''Create a new container service - include app_id and app_secret if using Kubernetes. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. agent_count (int): The number of agent VMs. agent_vm_size (str): VM size of agents, e.g. Standard_D1_v2. agent_dns (str): A unique DNS string for the agent DNS. master_dns (str): A unique string for the master DNS. admin_user (str): Admin user name. location (str): Azure data center location, e.g. westus. public_key (str): RSA public key (utf-8). master_count (int): Number of master VMs. orchestrator (str): Container orchestrator. E.g. DCOS, Kubernetes. app_id (str): Application ID for Kubernetes. app_secret (str): Application secret for Kubernetes. admin_password (str): Admin user password. ostype (str): Operating system. Windows of Linux. Returns: HTTP response. Container service JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API]) acs_body = {'location': location} properties = {'orchestratorProfile': {'orchestratorType': orchestrator}} properties['masterProfile'] = {'count': master_count, 'dnsPrefix': master_dns} ap_profile = {'name': 'AgentPool1'} ap_profile['count'] = agent_count ap_profile['vmSize'] = agent_vm_size ap_profile['dnsPrefix'] = agent_dns properties['agentPoolProfiles'] = [ap_profile] if ostype == 'Linux': linux_profile = {'adminUsername': admin_user} linux_profile['ssh'] = {'publicKeys': [{'keyData': public_key}]} properties['linuxProfile'] = linux_profile else: # Windows windows_profile = {'adminUsername': admin_user, 'adminPassword': admin_password} properties['windowsProfile'] = windows_profile if orchestrator == 'Kubernetes': sp_profile = {'ClientID': app_id} sp_profile['Secret'] = app_secret properties['servicePrincipalProfile'] = sp_profile acs_body['properties'] = properties body = json.dumps(acs_body) return do_put(endpoint, body, access_token) def delete_container_service(access_token, subscription_id, resource_group, service_name): '''Delete a named container. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API]) return do_delete(endpoint, access_token) def get_container_service(access_token, subscription_id, resource_group, service_name): '''Get details about an Azure Container Server Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response. JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API]) return do_get(endpoint, access_token) def list_acs_operations(access_token): '''List available Container Server operations. Args: access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/providers/Microsoft.ContainerService/operations', '?api-version=', ACS_API]) return do_get(endpoint, access_token) def list_container_services(access_token, subscription_id, resource_group): '''List the container services in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token) def list_container_services_sub(access_token, subscription_id): '''List the container services in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token)
{ "repo_name": "gbowerman/azurerm", "path": "azurerm/acs.py", "copies": "1", "size": "6324", "license": "mit", "hash": 3764736169637026000, "line_mean": 40.6052631579, "line_max": 97, "alpha_frac": 0.6225490196, "autogenerated": false, "ratio": 4.313778990450205, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5436328010050204, "avg_score": null, "num_lines": null }
"""ACSTOOLS regression test helpers.""" import os import pytest from ci_watson.artifactory_helpers import get_bigdata from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds from astropy.io import fits from astropy.io.fits import FITSDiff __all__ = ['calref_from_image', 'BaseACSTOOLS'] def calref_from_image(input_image): """ Return a list of reference filenames, as defined in the primary header of the given input image, necessary for calibration. This is mostly needed for destriping tools. """ # NOTE: Add additional mapping as needed. # Map *CORR to associated CRDS reference file. corr_lookup = { 'DQICORR': ['BPIXTAB', 'SNKCFILE'], 'ATODCORR': ['ATODTAB'], 'BLEVCORR': ['OSCNTAB'], 'SINKCORR': ['SNKCFILE'], 'BIASCORR': ['BIASFILE'], 'PCTECORR': ['PCTETAB', 'DRKCFILE', 'BIACFILE'], 'FLSHCORR': ['FLSHFILE'], 'CRCORR': ['CRREJTAB'], 'SHADCORR': ['SHADFILE'], 'DARKCORR': ['DARKFILE', 'TDCTAB'], 'FLATCORR': ['PFLTFILE', 'DFLTFILE', 'LFLTFILE'], 'PHOTCORR': ['IMPHTTAB'], 'LFLGCORR': ['MLINTAB'], 'GLINCORR': ['MLINTAB'], 'NLINCORR': ['NLINFILE'], 'ZSIGCORR': ['DARKFILE', 'NLINFILE'], 'WAVECORR': ['LAMPTAB', 'WCPTAB', 'SDCTAB'], 'SGEOCORR': ['SDSTFILE'], 'X1DCORR': ['XTRACTAB', 'SDCTAB'], 'SC2DCORR': ['CDSTAB', 'ECHSCTAB', 'EXSTAB', 'RIPTAB', 'HALOTAB', 'TELTAB', 'SRWTAB'], 'BACKCORR': ['XTRACTAB'], 'FLUXCORR': ['APERTAB', 'PHOTTAB', 'PCTAB', 'TDSTAB']} hdr = fits.getheader(input_image, ext=0) # Mandatory CRDS reference file. # Destriping tries to ingest some *FILE regardless of *CORR. ref_files = ref_from_image(input_image, ['CCDTAB', 'DARKFILE', 'PFLTFILE']) for step in corr_lookup: # Not all images have the CORR step and it is not always on. # Destriping also does reverse-calib. if ((step not in hdr) or (hdr[step].strip().upper() not in ('PERFORM', 'COMPLETE'))): continue ref_files += ref_from_image(input_image, corr_lookup[step]) return list(set(ref_files)) # Remove duplicates # Base class for actual tests. # NOTE: Named in a way so pytest will not pick them up here. # NOTE: bigdata marker requires TEST_BIGDATA environment variable to # point to a valid big data directory, whether locally or on Artifactory. # NOTE: envopt would point tests to "dev" or "stable". # NOTE: _jail fixture ensures each test runs in a clean tmpdir. @pytest.mark.bigdata @pytest.mark.usefixtures('_jail', 'envopt') class BaseACSTOOLS: # Timeout in seconds for file downloads. timeout = 30 instrument = 'acs' ignore_keywords = ['filename', 'date', 'iraf-tlm', 'fitsdate', 'opus_ver', 'cal_ver', 'proctime', 'history'] # To be defined by test class in actual test modules. detector = '' @pytest.fixture(autouse=True) def setup_class(self, envopt): """ Class-level setup that is done at the beginning of the test. Parameters ---------- envopt : {'dev', 'stable'} This is a ``pytest`` fixture that defines the test environment in which input and truth files reside. """ self.env = envopt def get_input_file(self, filename): """ Copy input file (ASN, RAW, etc) into the working directory. If ASN is given, RAW files in the ASN table is also copied. The associated CRDS reference files are also copied or downloaded, if necessary. Data directory layout for CALCOS:: detector/ input/ truth/ Parameters ---------- filename : str Filename of the ASN/RAW/etc to copy over, along with its associated files. """ # Copy over main input file. dest = get_bigdata('scsb-acstools', self.env, self.detector, 'input', filename) # For historical reason, need to remove ".orig" suffix if it exists. if filename.endswith('.orig'): newfilename = filename.rstrip('.orig') os.rename(filename, newfilename) filename = newfilename if filename.endswith('_asn.fits'): all_raws = raw_from_asn(filename) for raw in all_raws: # Download RAWs in ASN. get_bigdata('scsb-acstools', self.env, self.detector, 'input', raw) else: all_raws = [filename] first_pass = ('JENKINS_URL' in os.environ and 'ssbjenkins' in os.environ['JENKINS_URL']) for raw in all_raws: ref_files = calref_from_image(raw) for ref_file in ref_files: # Special reference files that live with inputs. if ('$' not in ref_file and os.path.basename(ref_file) == ref_file): get_bigdata('scsb-acstools', self.env, self.detector, 'input', ref_file) continue # Jenkins cannot see Central Storage on push event, # and somehow setting, say, jref to "." does not work anymore. # So, we need this hack. if '$' in ref_file and first_pass: first_pass = False if not os.path.isdir('/grp/hst/cdbs'): ref_path = os.path.dirname(dest) + os.sep var = ref_file.split('$')[0] os.environ[var] = ref_path # hacky hack hack # Download reference files, if needed only. download_crds(ref_file, timeout=self.timeout) def compare_outputs(self, outputs, atol=0, rtol=1e-7, raise_error=True, ignore_keywords_overwrite=None): """ Compare ACSTOOLS output with "truth" using ``fitsdiff``. Parameters ---------- outputs : list of tuple A list of tuples, each containing filename (without path) of CALXXX output and truth, in that order. Example:: [('output1.fits', 'truth1.fits'), ('output2.fits', 'truth2.fits'), ...] atol, rtol : float Absolute and relative tolerance for data comparison. raise_error : bool Raise ``AssertionError`` if difference is found. ignore_keywords_overwrite : list of str or `None` If not `None`, these will overwrite ``self.ignore_keywords`` for the calling test. Returns ------- report : str Report from ``fitsdiff``. This is part of error message if ``raise_error=True``. """ all_okay = True creature_report = '' if ignore_keywords_overwrite is None: ignore_keywords = self.ignore_keywords else: ignore_keywords = ignore_keywords_overwrite for actual, desired in outputs: desired = get_bigdata('scsb-acstools', self.env, self.detector, 'truth', desired) fdiff = FITSDiff(actual, desired, rtol=rtol, atol=atol, ignore_keywords=ignore_keywords) creature_report += fdiff.report() if not fdiff.identical and all_okay: all_okay = False if not all_okay and raise_error: raise AssertionError(os.linesep + creature_report) return creature_report
{ "repo_name": "jhunkeler/acstools", "path": "acstools/tests/helpers.py", "copies": "1", "size": "7731", "license": "bsd-3-clause", "hash": 7093138072615524000, "line_mean": 34.4633027523, "line_max": 79, "alpha_frac": 0.5600827836, "autogenerated": false, "ratio": 3.900605449041372, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4960688232641372, "avg_score": null, "num_lines": null }
TICK_MOVE = 0.01 TICK_ROTATE = 0.002 GPS_TIME = 1.0 SONAR_TIME = 0.1 FIELD_TIME = 0.1 ### Map constants ### MAP_GOAL = 4 # coding MAP_GOAL MAP_START_POSITION = 3 SQUARE_SIDE = 1.0 MAP_WALL = 1 # coding MAP_WALL MAP_WHITE = 0 # coding MAP_WHITE MAP_SPECIAL_DIRECTION = 11 # coding [MAP_SPECIAL_DIRECTION, DIRECTION] DIRECTION_E = 2 DIRECTION_NE = 3 DIRECTION_N = 4 DIRECTION_NW = 5 DIRECTION_W = 6 DIRECTION_SW = 7 DIRECTION_S = 0 DIRECTION_SE = 1 MAP_SPECIAL_EUCLIDEAN_DISTANCE = 9 # coding [MAP_SPECIAL_EUCLIDEAN_DISTANCE, DISTANCE IN MAP UNITS] MAP_SPECIAL_OPTIMAL = 10 CONSTANT_MAP = {"direction":MAP_SPECIAL_DIRECTION, "east": DIRECTION_E, "northeast": DIRECTION_NE, "north": DIRECTION_N, "northwest":DIRECTION_NW, "west": DIRECTION_W, "southwest": DIRECTION_SW, "south": DIRECTION_S, "southeast": DIRECTION_SE, "distance":MAP_SPECIAL_EUCLIDEAN_DISTANCE, "optimal_path":MAP_SPECIAL_OPTIMAL } MAP_CODING = {"#":1, ".":0, "s":MAP_START_POSITION, "x":MAP_GOAL} REV_MAP_CODING = {v:k for k,v in MAP_CODING.items()} REV_CONSTANT_MAP = {v:k for k,v in CONSTANT_MAP.items()} # GUI QT_NO_OPENGL = False class KrakrobotException(Exception): pass
{ "repo_name": "uj-robotics/Krakrobot2014Qualifications", "path": "defines.py", "copies": "1", "size": "1698", "license": "mit", "hash": -2153198698796782600, "line_mean": 28.7894736842, "line_max": 141, "alpha_frac": 0.6454652532, "autogenerated": false, "ratio": 2.769983686786297, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8773020185584675, "avg_score": 0.028485750880324488, "num_lines": 57 }
"""Action Base Classes to do actions on to db.""" class ConnectionAction: """Base class for using the session to execute action.""" def __init__(self, connection): """Initialise connection.""" self.connection = connection @property def session(self): """Session of the connection.""" return self.connection.session @property def end_point_url(self): """End point url for connection.""" return self.connection.getEndpointURL() def get(self, url, **kwargs): """HTTP GET Method.""" action_url = '%s%s' % (self.end_point_url, url) return self.session.get(action_url, **kwargs) def post(self, url, data=None, json=None, **kwargs): """HTTP POST Method.""" action_url = '%s%s' % (self.end_point_url, url) return self.session.post( action_url, data, json, **kwargs ) def put(self, url, data=None, **kwargs): """HTTP PUT Method.""" action_url = '%s%s' % (self.end_point_url, url) return self.session.put(action_url, data, **kwargs) def head(self, url, **kwargs): """HTTP HEAD Method.""" action_url = '%s%s' % (self.end_point_url, url) return self.session.head(action_url, **kwargs) def options(self, url, **kwargs): """HTTP OPTIONS Method.""" action_url = '%s%s' % (self.end_point_url, url) return self.session.options(action_url, **kwargs) def patch(self, url, data=None, **kwargs): """HTTP PATCH Method.""" action_url = '%s%s' % (self.end_point_url, url) return self.session.patch(action_url, data, **kwargs) def delete(self, url, **kwargs): """HTTP DELETE Method.""" action_url = '%s%s' % (self.end_point_url, url) return self.session.delete(action_url, **kwargs) class DatabaseAction(ConnectionAction): """Base class for using the session to execute action.""" def __init__(self, database): """Initialise database.""" self.database = database @property def session(self): """Session of the connection.""" return self.database.connection.session @property def end_point_url(self): """End point url for database.""" return '%s/_db/%s' % ( self.database.connection.getEndpointURL(), self.database.name )
{ "repo_name": "tariqdaouda/pyArango", "path": "pyArango/action.py", "copies": "1", "size": "2392", "license": "apache-2.0", "hash": -8419510888283253000, "line_mean": 30.8933333333, "line_max": 73, "alpha_frac": 0.5823578595, "autogenerated": false, "ratio": 3.8149920255183414, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9894572107240563, "avg_score": 0.0005555555555555556, "num_lines": 75 }
# ACTION_CHECKBOX_NAME is unused, but should stay since its import from here # has been referenced in documentation. from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME from main_application import Site import settings def autodiscover(): """ Auto-discover INSTALLED_APPS admin.py modules and fail silently when not present. This forces an import on them to register any admin bits they may want. """ import copy from django.conf import settings from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule for app in settings.INSTALLED_APPS: mod = import_module(app) # Attempt to import the app's admin module. try: before_import_registry = copy.copy(site._registry) import_module('%s.admin' % app) except: # Reset the model registry to the state before the last import as # this import will have to reoccur on the next request and this # could raise NotRegistered and AlreadyRegistered exceptions # (see #8245). site._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have an admin module, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(mod, 'admin'): raise def createmenus(): from django.conf import settings from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule for app in settings.INSTALLED_APPS: mod = import_module(app) if module_has_submodule(mod, 'menu'): import_module('%s.menu' % app) site = Site(name='', app_name='')
{ "repo_name": "jAlpedrinha/DeclRY", "path": "declry/__init__.py", "copies": "1", "size": "1632", "license": "bsd-3-clause", "hash": -1028576474429234600, "line_mean": 33.0208333333, "line_max": 76, "alpha_frac": 0.7475490196, "autogenerated": false, "ratio": 3.6839729119638824, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49315219315638825, "avg_score": null, "num_lines": null }
"""Action class and helper functions.""" # Copyright (c) 2001-2009 ElevenCraft Inc. # See LICENSE for details. import sys from schevo.lib import optimize from schevo.base import Entity, Extent, View from schevo.introspect import commontype, isselectionmethod from schevo.label import label DEFAULT_T_METHODS = ['clone', 'create', 'delete', 'update'] LABELS_WITH_SHORTCUTS = { # 'Label': '_Label', 'Clone...': '_Clone...', 'Delete...': '_Delete...', 'Edit...': '_Edit...', 'New...': '_New...', 'View...': '_View...', } class Action(object): db = None instance = None label = '' method = None name = '' related = None selection = None type = '' @property def label_with_shortcut(self): if self.label in LABELS_WITH_SHORTCUTS: return LABELS_WITH_SHORTCUTS[self.label] else: return self.label def __cmp__(self, other): try: return cmp(self.label, other.label) except AttributeError: return cmp(hash(self), hash(other)) def get_method_action(db, instance, namespace_id, method_name, related=None): """Return action for method name.""" namespace = getattr(instance, namespace_id) method = namespace[method_name] method_label = label(method) action = Action() action.db = db action.instance = instance # Default label. action.label = u'%s...' % method_label if namespace_id == 't' and method_name in DEFAULT_T_METHODS: # Determine if there are any custom methods whose labels start # with the same string. t = action.instance.t other_found = False for other_name in t: if other_name not in DEFAULT_T_METHODS: other_label = label(t[other_name]) if other_label.startswith(method_label): other_found = True if other_found: # Custom labels, since there are custom methods that share # prefixes. if isinstance(instance, Entity): action.label = u'%s %s...' % ( method_label, label(instance.s.extent)) elif isinstance(instance, Extent): action.label = u'%s %s...' % (method_label, label(instance)) elif isinstance(instance, View): action.label = u'%s %s...' % ( method_label, label(instance.s.entity.s.extent)) action.method = method action.name = method_name action.related = related if namespace_id == 'q': action.type = 'query' elif namespace_id == 't': action.type = 'transaction' return action def get_relationship_actions(db, entity): """Return list of relationship actions for an entity instance.""" actions = [] if entity is not None: items = [] if entity.s.extent.relationships: items = [ 'Relationships...', ] for text in items: action = Action() action.db = db action.instance = entity action.label = text action.name = 'relationship' action.type = 'relationship' actions.append(action) return sorted(actions) def get_tx_actions(db, instance, related=None): """Return list of actions for an extent or entity instance.""" actions = [] if instance is not None: t_methods = set(instance.t) for method_name in sorted(t_methods): action = get_method_action(db, instance, 't', method_name, related) actions.append(action) return sorted(actions) def get_tx_selectionmethod_actions(db, selection): """Return list of selectionmethod transactions for an extent.""" cls = commontype(selection) if cls is None: return [] else: if getattr(cls, '_hidden_t_selectionmethods', None) is not None: hidden = cls._hidden_t_selectionmethods(selection) or [] else: hidden = [] actions = [] for method_name in sorted(cls.t): if method_name not in hidden: action = get_method_action(db, cls, 't', method_name) action.selection = selection actions.append(action) return sorted(actions) def get_view_actions(db, entity): """Return list of view actions for an entity instance.""" actions = [] if entity is not None: if (entity._hidden_views is not None and 'default' in entity._hidden_views ): return actions options = [False] for name, FieldClass in entity._field_spec.iteritems(): if FieldClass.expensive: # XXX: Remove this and add support for View objects. ## options.append(True) break for include_expensive in options: action = get_view_action(db, entity, include_expensive) actions.append(action) return sorted(actions) def get_view_action(db, entity, include_expensive): if include_expensive: text = u'View (including expensive fields)...' else: text = u'View...' action = Action() action.db = db action.include_expensive = include_expensive action.instance = entity action.label = text action.name = 'view' action.type = 'view' return action optimize.bind_all(sys.modules[__name__]) # Last line of module.
{ "repo_name": "Schevo/schevogtk", "path": "schevogtk2/action.py", "copies": "1", "size": "5499", "license": "mit", "hash": 4450006716803458000, "line_mean": 30.0677966102, "line_max": 79, "alpha_frac": 0.5811965812, "autogenerated": false, "ratio": 4.119101123595506, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5200297704795506, "avg_score": null, "num_lines": null }
ACTION_CREATE = 0 ACTION_VIEW = 1 ACTION_UPDATE = 2 ACTION_DELETE = 3 ACTIONS = { ACTION_CREATE: 'Create', ACTION_VIEW: 'View', ACTION_UPDATE: 'Update', ACTION_DELETE: 'Delete', } STATIC = 'st' DYNAMIC = 'dy' LEVEL_GUEST = 0 LEVEL_USER = 1 LEVEL_ADMIN = 2 LEVELS = { LEVEL_GUEST: 'Guest', LEVEL_USER: 'User', LEVEL_ADMIN: 'Admin', } IP_TYPE_4 = '4' IP_TYPE_6 = '6' IP_TYPES = { IP_TYPE_4: 'IPv4', IP_TYPE_6: 'IPv6' } DHCP_OBJECTS = ("workgroup", "vrf", "vlan", "site", "range", "network", "static_interface", "dynamic_interface", "workgroup_av", "vrf_av", "vlan_av", "site_av", "range_av", "network_av") DNS_OBJECTS = ("address_record", "cname", "domain", "mx", "nameserver", "ptr", "soa", "srv", "sshfp", "txt", "view",) CORE_OBJECTS = ("ctnr_users", "ctnr", "user", "system") def get_klasses(obj_type): from cyder.cydns.address_record.forms import AddressRecordForm from cyder.cydns.cname.forms import CNAMEForm from cyder.core.ctnr.forms import CtnrForm from cyder.cydns.domain.forms import DomainForm from cyder.cydhcp.interface.dynamic_intr.forms import DynamicInterfaceForm from cyder.cydns.mx.forms import MXForm from cyder.cydns.nameserver.forms import NameserverForm from cyder.cydhcp.network.forms import NetworkForm, NetworkAVForm from cyder.cydns.ptr.forms import PTRForm from cyder.cydhcp.range.forms import RangeForm, RangeAVForm from cyder.cydhcp.site.forms import SiteForm, SiteAVForm from cyder.cydns.soa.forms import SOAForm, SOAAVForm from cyder.cydns.srv.forms import SRVForm from cyder.cydns.sshfp.forms import SSHFPForm from cyder.core.system.forms import SystemForm, SystemAVForm from cyder.cydhcp.interface.static_intr.forms import StaticInterfaceForm from cyder.cydns.txt.forms import TXTForm from cyder.cydhcp.vlan.forms import VlanForm, VlanAVForm from cyder.cydhcp.vrf.forms import VrfForm, VrfAVForm from cyder.cydhcp.workgroup.forms import WorkgroupForm, WorkgroupAVForm from cyder.models import ( AddressRecord, CNAME, Ctnr, Domain, DynamicInterface, MX, Nameserver, Network, NetworkAV, PTR, Range, RangeAV, Site, SiteAV, SOA, SOAAV, SRV, SSHFP, StaticInterface, System, SystemAV, TXT, Vlan, VlanAV, Vrf, VrfAV, Workgroup, WorkgroupAV ) klasses = { 'address_record': (AddressRecord, AddressRecordForm), 'cname': (CNAME, CNAMEForm), 'ctnr': (Ctnr, CtnrForm), 'domain': (Domain, DomainForm), 'dynamic_interface': (DynamicInterface, DynamicInterfaceForm), 'mx': (MX, MXForm), 'nameserver': (Nameserver, NameserverForm), 'network': (Network, NetworkForm), 'network_av': (NetworkAV, NetworkAVForm), 'ptr': (PTR, PTRForm), 'range': (Range, RangeForm), 'range_av': (RangeAV, RangeAVForm), 'site': (Site, SiteForm), 'site_av': (SiteAV, SiteAVForm), 'soa': (SOA, SOAForm), 'soa_av': (SOAAV, SOAAVForm), 'srv': (SRV, SRVForm), 'sshfp': (SSHFP, SSHFPForm), 'static_interface': (StaticInterface, StaticInterfaceForm), 'system': (System, SystemForm), 'system_av': (SystemAV, SystemAVForm), 'txt': (TXT, TXTForm), 'vlan': (Vlan, VlanForm), 'vlan_av': (VlanAV, VlanAVForm), 'vrf': (Vrf, VrfForm), 'vrf_av': (VrfAV, VrfAVForm), 'workgroup': (Workgroup, WorkgroupForm), 'workgroup_av': (WorkgroupAV, WorkgroupAVForm), } return klasses[obj_type]
{ "repo_name": "murrown/cyder", "path": "cyder/base/constants.py", "copies": "1", "size": "3613", "license": "bsd-3-clause", "hash": 1569970905253723100, "line_mean": 34.4215686275, "line_max": 79, "alpha_frac": 0.6460005536, "autogenerated": false, "ratio": 3.1335646140503037, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4279565167650304, "avg_score": null, "num_lines": null }
"""action_feedback model.""" from django.db import models from django.contrib.auth.models import User from django.contrib import admin from apps.widgets.smartgrid.models import Action from apps.managers.challenge_mgr import challenge_mgr from apps.admin.admin import challenge_designer_site, challenge_manager_site, developer_site from apps.widgets.AskQuestion.models import AskQuestion class ReturnAnswer(models.Model): """Defines the Action Feedback model.""" m_question = models.ForeignKey(AskQuestion, null=True, blank=True, help_text="The user asking the question.") m_answer = models.CharField(max_length=8000) m_user = models.ForeignKey(User, null=True, blank=True, help_text="The user answering the question.") m_rating = models.IntegerField(null=True,default=1) def __unicode__(self): return "%s rated %s %d and said %s" % \ (self.user.username, self.action.name, self.rating, self.comment) admin.site.register(ReturnAnswer) challenge_designer_site.register(ReturnAnswer) challenge_manager_site.register(ReturnAnswer) developer_site.register(ReturnAnswer) #challenge_mgr.register_developer_game_info_model("Smart Grid Game", ActionFeedback)
{ "repo_name": "vijayanandau/KnowledgeShare", "path": "makahiki/apps/widgets/ReturnAnswer/models.py", "copies": "1", "size": "1320", "license": "mit", "hash": -5163270401571447000, "line_mean": 40.25, "line_max": 92, "alpha_frac": 0.6946969697, "autogenerated": false, "ratio": 4.036697247706422, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5231394217406422, "avg_score": null, "num_lines": null }
"""Action functions to be taken in response to events.""" import re # pylint: disable-msg=W0613 def call_notification(modem, message): """Execute when someone is calling.""" print('Someone is calling') def null_action(modem, message): """Take no action.""" pass def rssi_update(modem, message): """Handle RSSI level change.""" modem.status.rssi = modem.get_rssi() def flow_report_update(modem, message): """Update connection report.""" hex2dec = lambda h: int(h, 16) flow_rpt = message[11:].rstrip() values = [hex2dec(item) for item in flow_rpt.split(',', 7)] sts = modem.status (sts.link_uptime, sts.uplink, sts.downlink, sts.byte_tx, sts.byte_rx) = values[0:5] def mode_update(modem, message): """Update connection mode.""" # Source info is no longer available, taken from: # https://forge.betavine.net/pipermail/vodafonemobilec-devel/ # 2007-November/000044.html mode_dict = {'0': 'No service', '1': 'AMPS', '2': 'CDMA', '3': 'GSM/GPRS', '4': 'HDR', '5': 'WCDMA', '6': 'GPS'} submode_dict = {'0': 'None', '1': 'GSM', '2': 'GPRS', '3': 'EDEG', '4': 'WCDMA', '5': 'HSDPA', '6': 'HSUPA', '7': 'HSDPA'} mode, submode = message[6:].strip().split(',', 1) modem.status.mode = '%s/%s' % (mode_dict[mode], submode_dict[submode]) def new_message(modem, message): """New message action.""" print('New message arrived.') PATTERN = {'incoming call': re.compile(r'^RING\r\n'), 'new sms': re.compile(r'^\+CMTI:.*'), 'rssi update': re.compile(r'^\^RSSI:.*'), 'flow report': re.compile(r'^\^DSFLOWRPT:'), 'mode update': re.compile(r'^\^MODE:.*'), 'boot update': re.compile(r'^\^BOOT:.*$'), 'new line': re.compile(r'^\r\n$'), 'empty line': re.compile(r'^$')} STANDARD_ACTIONS = [(PATTERN['incoming call'], call_notification), (PATTERN['new line'], null_action), (PATTERN['empty line'], null_action), (PATTERN['boot update'], null_action), (PATTERN['new sms'], new_message), (PATTERN['mode update'], mode_update), (PATTERN['rssi update'], rssi_update), (PATTERN['flow report'], flow_report_update)]
{ "repo_name": "oozie/pyhumod", "path": "humod/actions.py", "copies": "1", "size": "2285", "license": "bsd-3-clause", "hash": 4282779385101633500, "line_mean": 37.7288135593, "line_max": 78, "alpha_frac": 0.5693654267, "autogenerated": false, "ratio": 3.178025034770515, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.918484777644659, "avg_score": 0.012508537004784957, "num_lines": 59 }
"""Action Geneator API. From information about theorem prover's state, generate a set of possible actions to take in the prover. """ from __future__ import absolute_import from __future__ import division # Import Type Annotations from __future__ import print_function import collections import numpy as np import scipy import tensorflow as tf from typing import List, Tuple, Optional, Text from deepmath.deephol import deephol_pb2 from deepmath.deephol import embedding_store from deepmath.deephol import predictions from deepmath.deephol import process_sexp from deepmath.deephol import proof_search_tree from deepmath.deephol import theorem_fingerprint from deepmath.deephol.utilities import normalization_lib from deepmath.proof_assistant import proof_assistant_pb2 Suggestion = collections.namedtuple('Suggestion', ['string', 'score']) EPSILON = 1e-12 MAX_CLOSEST = 100 MIN_SCORED_PARAMETERS = 1 WORD_WEIGHTS_NOISE_SCALE = 1.0 def _theorem_string_for_similarity_scorer(thm: proof_assistant_pb2.Theorem ) -> Text: return process_sexp.process_sexp(str(thm.conclusion)) class SimilarityScorer(object): """SimilarityScorer.""" def __init__(self, theorem_database): # We assume theorem database is normalized (so can use tokens directly). self.theorem_database = theorem_database self.num_words = 0 self.word_to_index = {} self.vocab = [] self.freq = [] # build vocab, freq for theorem in self.theorem_database.theorems: if theorem.training_split != proof_assistant_pb2.Theorem.TRAINING: continue words = _theorem_string_for_similarity_scorer(theorem).split() for word in words: if word in self.word_to_index: index = self.word_to_index[word] else: index = self.num_words self.num_words += 1 self.word_to_index[word] = index self.vocab.append(word) self.freq.append(0) self.freq[index] += 1 freq_sum = sum(self.freq) self.inv_freq = np.array([1.0 / float(f) for f in self.freq]) tf.logging.info('Vocab size: %d', self.num_words) tf.logging.info('Frequency sum: %d', freq_sum) self.reset_word_weights() def reset_word_weights(self): """Reset word weights, and recompute premise_vectors.""" tf.logging.info('Resetting word weights') self.word_weights = np.multiply( self.inv_freq, np.absolute( np.random.normal( loc=1.0, scale=WORD_WEIGHTS_NOISE_SCALE, size=self.num_words))) self.premise_vectors = np.array([ self.vectorize(_theorem_string_for_similarity_scorer(theorem)) for theorem in self.theorem_database.theorems ]) def vectorize(self, sentence: Text): v = np.zeros(self.num_words) for word in sentence.split(): # TODO(kbk): Consider counting words not in index. if word in self.word_to_index: index = self.word_to_index[word] v[index] += self.word_weights[index] return v def _compute_parameter_string(types, pass_no_arguments: bool, thm_ranked: List[Tuple[float, Text]] ) -> List[Text]: """Computes appropriate parameters from a ranked list based on tactic type. Args: types: Expected type of computed parameters (e.g. thm, list of thms, etc), of type List[deephol_pb2.Tactic.ParameterType]. pass_no_arguments: Pass no parameters to the tactic. thm_ranked: ranked theorem parameters. Returns: A list of string-represented parameter candidates. Raises: ValueError: if appropriate parameter candidates cannot be generated. """ if not types: return [''] if not thm_ranked: raise ValueError('Theorem parameters are required.') if types == [deephol_pb2.Tactic.THEOREM]: return [' %s' % thm_ranked[0][1]] if types == [deephol_pb2.Tactic.THEOREM_LIST]: ret = [] if not thm_ranked: ret.append(' [ ]') return ret # If predictor also suggests passing no arguments to the tactic, then # additionally return an empty list as a parameter string. if pass_no_arguments: ret.append(' [ ]') best_thms = [t for _, t in thm_ranked] ret.append(' [ %s ]' % ' ; '.join(best_thms)) return ret raise ValueError('Unsupported tactic parameter types %s' % str(types)) class ActionGenerator(object): """Generates candidate actions given the theorem prover's current state.""" def __init__( self, theorem_database: proof_assistant_pb2.TheoremDatabase, tactics: List[deephol_pb2.Tactic], predictor: predictions.Predictions, options: deephol_pb2.ActionGeneratorOptions, model_architecture: deephol_pb2.ProverOptions.ModelArchitecture, emb_store: Optional[embedding_store.TheoremEmbeddingStore] = None): self.theorem_database = theorem_database self.tactics = tactics self.predictor = predictor self.options = options self.model_architecture = model_architecture self.embedding_store = emb_store self.thm_names = [ theorem_fingerprint.ToTacticArgument(thm) for thm in theorem_database.theorems ] self.thm_index_by_fingerprint = { theorem_fingerprint.Fingerprint(thm): i for (i, thm) in enumerate(theorem_database.theorems) } self.similarity_scorer = SimilarityScorer(self.theorem_database) def _get_theorem_scores(self, proof_state_enc, thm_number: int, tactic_id: int): """Get the scores of all the theorems before the given theorem index. This functions scores all preceding theorems in the list of theorems, by computing all pairwise scores with the given proof state encoding. Args: proof_state_enc: A numpy vector of the proof state encoding. thm_number: Index of the theorem in the theorem database. tactic_id: For tactic dependent prediction, provide tactic id. Returns: A numpy vector of theorem scores for all preceding theorems in the same order they are present in the theorem database. """ if self.embedding_store: return self.embedding_store.get_thm_scores_for_preceding_thms( proof_state_enc, thm_number, tactic_id) relevant_thms = self.theorem_database.theorems[:thm_number] if relevant_thms: # TODO(smloos): update predictions API to use proof_assistant_pb2.Theorem thms_emb = self.predictor.batch_thm_embedding([ normalization_lib.normalize(thm).conclusion for thm in relevant_thms ]) else: thms_emb = np.empty([0]) tf.logging.debug(thms_emb) if len(thms_emb): # pylint: disable=g-explicit-length-test thm_scores = self.predictor.batch_thm_scores(proof_state_enc, thms_emb, tactic_id) else: thm_scores = [] tf.logging.debug(thm_scores) return thm_scores def _compute_tactic_scores(self, proof_state_encoded): if self.options.random_tactic_probability > np.random.random(): return np.random.random([len(self.tactics)]) return self.predictor.batch_tactic_scores([proof_state_encoded])[0] def compute_closest(self, goal, thm_number): if not (self.options.HasField('num_similar_parameters') and self.options.num_similar_parameters.max_value > 0): return None if self.options.bag_of_words_similar: return self.compute_bag_of_words_closest(goal, thm_number) return self.compute_network_based_closest(goal, thm_number) def compute_bag_of_words_closest(self, goal, thm_number): self.similarity_scorer.reset_word_weights() goal_vector = self.similarity_scorer.vectorize( _theorem_string_for_similarity_scorer(goal)) distance_scores = scipy.spatial.distance.cdist( self.similarity_scorer.premise_vectors[:thm_number], goal_vector.reshape(1, -1), 'cosine').reshape(-1).tolist() ranked_closest = sorted(zip(distance_scores, self.thm_names)) return ranked_closest[:self.options.max_theorem_parameters] def compute_network_based_closest(self, goal, thm_number): """Compute closest based on premise embeddings.""" # TODO(kbk): Add unit tests for this section (similar_parameters). goal_embedding_as_thm = self.predictor.thm_embedding( normalization_lib.normalize(goal).conclusion) premise_embeddings = ( self.embedding_store.get_embeddings_for_preceding_thms(thm_number)) # distance_score each is in [0,2] distance_scores = scipy.spatial.distance.cdist( premise_embeddings, goal_embedding_as_thm.reshape(1, -1), 'cosine').reshape(-1).tolist() ranked_closest = sorted(zip(distance_scores, self.thm_names)) ranked_closest = ranked_closest[:MAX_CLOSEST] tf.logging.info( 'Cosine closest in premise embedding space:\n%s', '\n'.join( ['%s: %.6f' % (name, score) for score, name in ranked_closest])) # add some noise to top few and rerank noise = np.random.normal(scale=0.2, size=MAX_CLOSEST) ranked_closest = [(score + noise[i], name) for i, (score, name) in enumerate(ranked_closest)] ranked_closest = sorted(ranked_closest) return ranked_closest[:self.options.max_theorem_parameters] def add_similar(self, thm_ranked, ranked_closest): """Mix in provided ranked_closest theorems to thm_ranked.""" if not ranked_closest: return thm_ranked[:self.options.max_theorem_parameters] num_similar = np.random.random_integers( self.options.num_similar_parameters.min_value, self.options.num_similar_parameters.max_value) num_similar = min( num_similar, self.options.max_theorem_parameters - MIN_SCORED_PARAMETERS) ranked_closest = ranked_closest[:num_similar] # remove duplicates ranked_closest_names = [name for score, name in ranked_closest] thm_ranked = [(score, name) for score, name in thm_ranked if name not in ranked_closest_names] return (ranked_closest + thm_ranked)[:self.options.max_theorem_parameters] def step(self, node: proof_search_tree.ProofSearchNode, premises: proof_assistant_pb2.PremiseSet) -> List[Suggestion]: """Generates a list of possible ApplyTactic argument strings from a goal. Args: node: state of the proof search, starting at current goal. premises: Specification of the selection of premises that can be used for tactic parameters. Currently we are supporting only a single DatabaseSection. Returns: List of string arugments for HolLight.ApplyTactic function, along with scores (Suggestion). """ assert not premises.reference_sets, ('Premise reference sets are not ' 'supported.') assert len(premises.sections) == 1, ('Premise set must have exactly one ' 'section.') # TODO(szegedy): If the premise is not specified, we want the whole # database to be used. Not sure if -1 or len(database.theorems) would do # that or not. Assertion will certainly fail before that. # Also we don't have checks on this use case. assert premises.sections[0].HasField('before_premise'), ('Premise is ' 'required.') fp = premises.sections[0].before_premise thm_number = self.thm_index_by_fingerprint.get(fp) assert thm_number is not None assert theorem_fingerprint.Fingerprint( self.theorem_database.theorems[thm_number]) == fp thm_names = self.thm_names[:thm_number] tf.logging.debug(thm_names) # TODO(smloos): update predictor api to accept theorems directly proof_state = predictions.ProofState( goal=str(normalization_lib.normalize(node.goal).conclusion)) proof_state_emb = self.predictor.proof_state_embedding(proof_state) proof_state_enc = self.predictor.proof_state_encoding(proof_state_emb) tf.logging.debug(proof_state_enc) tactic_scores = self._compute_tactic_scores(proof_state_enc) empty_emb = self.predictor.thm_embedding('') empty_emb_batch = np.reshape(empty_emb, [1, empty_emb.shape[0]]) enumerated_tactics = enumerate(self.tactics) if self.options.asm_meson_only: enumerated_tactics = [ v for v in enumerated_tactics if str(v[1].name) == 'ASM_MESON_TAC' ] assert enumerated_tactics, ( 'action generator option asm_meson_only requires ASM_MESON_TAC.') ranked_closest = self.compute_closest(node.goal, thm_number) if ranked_closest: tf.logging.info( 'Cosine closest picked:\n%s', '\n'.join( ['%s: %.6f' % (name, score) for score, name in ranked_closest])) ret = [] thm_scores = None # TODO(smloos): This computes parameters for all tactics. It should cut off # based on the prover BFS options. for tactic_id, tactic in enumerated_tactics: if (thm_scores is None or self.model_architecture == deephol_pb2.ProverOptions.PARAMETERS_CONDITIONED_ON_TAC): thm_scores = self._get_theorem_scores(proof_state_enc, thm_number, tactic_id) tf.logging.debug(thm_scores) no_params_score = self.predictor.batch_thm_scores( proof_state_enc, empty_emb_batch, tactic_id)[0] tf.logging.info('Theorem score for empty theorem: %f0.2', no_params_score) thm_ranked = sorted( zip(thm_scores, self.thm_names), reverse=True)[:self.options.max_theorem_parameters] pass_no_arguments = thm_ranked[-1][0] < no_params_score thm_ranked = self.add_similar(thm_ranked, ranked_closest) tf.logging.info('thm_ranked: %s', str(thm_ranked)) tactic_str = str(tactic.name) try: tactic_params = _compute_parameter_string( list(tactic.parameter_types), pass_no_arguments, thm_ranked) for params_str in tactic_params: ret.append( Suggestion( string=tactic_str + params_str, score=tactic_scores[tactic_id])) except ValueError as e: tf.logging.warning('Failed to compute parameters for tactic %s: %s', tactic.name, str(e)) return ret class MesonActionGenerator(object): """Trivial action generator, which always returns MESON tactic.""" def step(self, goal: proof_assistant_pb2.Theorem, thm: proof_assistant_pb2.Theorem) -> List[Tuple[Text, float]]: del goal # unused del thm # unused return [('ASM_MESON_TAC [ ]', 1.0)]
{ "repo_name": "tensorflow/deepmath", "path": "deepmath/deephol/action_generator.py", "copies": "1", "size": "14678", "license": "apache-2.0", "hash": -7636460593149942000, "line_mean": 39.3241758242, "line_max": 79, "alpha_frac": 0.6625562066, "autogenerated": false, "ratio": 3.4463489081944116, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46089051147944116, "avg_score": null, "num_lines": null }
ACTION_IDENTIFIER = "_ACTION" KEY_IDENTIFIER = "_KEY" # - Action Types: LOAD_URL_ACTION = "load_url" CLICK_ACTION = "click" CLICK_ELEMENT_WITH_OFFSET_ACTION = "click_element_with_offset" HOVER_ACTION = "hover" SCROLL_AN_ELEMENT_ACTION = "scroll_an_element" REFRESH_ACTION = "refresh" ENTER_TEXT_ACTION = "enter_text" SEND_SPECIAL_KEY_ACTION = "send_special_key" SLEEP_ACTION = "sleep" WAIT_FOR_ELEMENT_ACTION = "wait_for_element" SCROLL_WINDOW_TO_POSITION_ACTION = "scroll_window_to_position" SCROLL_WINDOW_TO_ELEMENT_ACTION = "scroll_window_to_element" FOR_EACH_ACTION = "for_each" SHOW_ELEMENT_ACTION = "show_element" HIDE_ELEMENT_ACTION = "hide_element" SWITCH_WINDOW_HANDLE_ACTION = "switch_window_handle" CLOSE_WINDOW_ACTION = "close_window" EXECUTE_SCRIPT_ACTION = "execute_script" FOCUS_ACTION = "focus" ALL_ACTION_TYPES = [locals()[v] for v in dir() if v.endswith(ACTION_IDENTIFIER)] # - Action Keys ACTION_KEY = "action" ACTION_LIST_KEY = "actions" ALLOW_EMPTY_KEY = "allow_empty" BYPASS_404_KEY = "bypass_404" CHILD_KEY = "child" CSS_SELECTOR_KEY = "css_selector" CURRENT_URL_KEY = "current_url" DO_NOT_INCREMENT_KEY = "do_not_increment_element_count" DURATION_KEY = "duration" ELEMENT_KEY = "element" FULL_NAME_KEY = "full_name" INDEX_KEY = "index" INPUT_KEY = "input" INPUT_TYPE_KEY = "input_type" LIBRARY_KEY = "library" PATH_KEY = "path" POSITION_BOTTOM_KEY = "position_bottom" POSITION_MIDDLE_KEY = "position_middle" POSITION_TOP_KEY = "position_top" REFERENCE_KEY = "ref" SCRIPT_KEY = "script" SCROLL_PADDING_KEY = "scroll_padding" SPECIAL_KEY_KEY = "key" SUFFIX_KEY = "suffix" URL_KEY = "url" VIEWPORT_ONLY_KEY = "viewport_only" X_POSITION_KEY = "x_position" Y_POSITION_KEY = "y_position" ALL_ACTION_KEYS = [locals()[v] for v in dir() if v.endswith(KEY_IDENTIFIER)]
{ "repo_name": "meltmedia/the-ark", "path": "the_ark/resources/action_constants.py", "copies": "1", "size": "1786", "license": "apache-2.0", "hash": -1829212935552600300, "line_mean": 30.350877193, "line_max": 80, "alpha_frac": 0.7250839866, "autogenerated": false, "ratio": 2.790625, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40157089865999995, "avg_score": null, "num_lines": null }
__actionname__ = 'bats eyelashes :lips:' from sys import argv from Character import Character, get_data from dice import d6, check import private import slacker def main(argv): slack = slacker.Slacker(private.sixthWorld) charactername = argv[0] difficulty = argv[1] channelname = argv[2] modifiers = \ { 'sexy sleepwear': 4, 'pheromone augmentation': 2 } last = len(modifiers) - 1 mod_bonus = 0 for key, value in modifiers.items(): mod_bonus += value character = get_data(charactername) die_pool = character['Charisma'] + character['Seduction'] + mod_bonus # create good looking description description = '' description += ('_' + character['Name'] + ' ' + __actionname__ + '_\n') description += ( "Charisma(" + str(character['Charisma']) + ") + " + "Seduction(" + str(character['Seduction']) + ") + ") for i, key in enumerate(modifiers): if i == last: operator = '= ' else: operator = '+ ' description += ( str(key) + "(" + str(modifiers[key]) + ") " + operator) description += ("*" + str(die_pool) + "d6*") # perform the roll roll = d6(die_pool) # send results to slack output = check(difficulty, roll) slack.chat.post_message(channelname, description + '\n' + str(roll) + "\n" + output, username=character['Name'], icon_url=character['imageURL']) if __name__ == '__main__': main(['jtrip', 5, '#scratch'])
{ "repo_name": "jtrip/slackrun", "path": "charm.py", "copies": "1", "size": "1542", "license": "bsd-2-clause", "hash": -1953781138456815900, "line_mean": 24.2786885246, "line_max": 148, "alpha_frac": 0.5648508431, "autogenerated": false, "ratio": 3.5944055944055946, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46592564375055945, "avg_score": null, "num_lines": null }
'''Action''' from src.constants import Constants as C import sys import logging as log class Action(object): '''Action''' def __init__(self, server, node): '''Init ''' self.server = server self.node = node self.actions = None def get_pending(self): '''Get Pending actions from server''' self.actions = self._get_pending(self.node) return len(self.actions) def _get_pending(self, node): '''Get pending actions for node''' log.info('Getting Node Actions') endpoint = '/api/actions/pending/%d' % node.get_id() data = self.server.get(endpoint, None) log.info('Got Pending Actions: %s', data) return data def has_pending(self): '''Check if there are pending actions''' num_pending = len(self.actions) log.info('Has %d Pending Actions', num_pending) return num_pending > 0 def update_action_status(self, action, status): '''Update status for the action''' log.info('Updating Action (%s) Status to: %s', action['action'], status) action['status'] = status return self.server.put('/api/actions', action) def respond_to_pending(self): '''Respong to all pending actions''' for action in self.actions: self._respond_to_action(action) def _respond_to_action(self, action): '''Respond to Actions from the server''' action_name = action['action'] #args = action['args'] log.info('Responding to Action: %s', action) #Mark action as completed self.update_action_status(action, C.ACTION_STATUS_COMPLETED) #TODO respond to all actions if action_name == C.ACTION_KILL: message = 'Shutting down node.' log.info(message) sys.exit()
{ "repo_name": "CloudWorkers/cloudworker", "path": "node/src/action.py", "copies": "1", "size": "1863", "license": "apache-2.0", "hash": -5341945600510341000, "line_mean": 24.875, "line_max": 80, "alpha_frac": 0.5877616747, "autogenerated": false, "ratio": 4.0588235294117645, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5146585204111764, "avg_score": null, "num_lines": null }
"""Action""" import torch import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt import matplotlib.cm as cm import skimage.transform from scipy.misc import imread, imresize from PIL import Image class Action: def __init__(self): super(Action, self).__init__() # sets device for model and PyTorch tensors pass def get_optimizer(self, model, learning_rate): """Get optimizer""" optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) return optimizer def get_loss_fn(self): """Get loss function""" loss_fn = nn.CrossEntropyLoss() return loss_fn def clip_gradient(self, optimizer, grad_clip): """ Clips gradients computed during backpropagation to avoid explosion of gradients. :param optimizer: optimizer with the gradients to be clipped :param grad_clip: clip value """ for group in optimizer.param_groups: for param in group["params"]: if param.grad is not None: param.grad.data.clamp_(-grad_clip, grad_clip) def caption_by_beam_search(self, decoder, encoder_out, word_map, beam_size=5): """Caption by beam search :param decoder: decoder model :param beam_size: number of sequences to consider at each decode-step :return: caption, weights for visualization """ device = encoder_out.device vocab_size = len(word_map) # Lists to store completed sequences, their alphas and scores complete_seqs = list() complete_seqs_alpha = list() complete_seqs_scores = list() enc_image_size = encoder_out.size(1) # Tensor to store top k previous words at each step; # now they're just <start> k_prev_words = torch.LongTensor([[word_map["<start>"]]] * beam_size).to(device) # Tensor to store top k sequences; now they're just <start> seqs = k_prev_words # Tensor to store top k sequences' scores; now they're just 0 top_k_scores = torch.zeros(beam_size, 1).to(device) # Tensor to store top k sequences' alphas; now they're just 1s # (k, 1, enc_image_size, enc_image_size) seqs_alpha = torch.ones(beam_size, 1, enc_image_size, enc_image_size).to(device) # Start decoding step = 1 h, c = decoder.init_hidden_state(encoder_out) # s is a number less than or equal to k, # because sequences are removed from this process once they hit <end> while True: # (s, embed_dim) embeddings = decoder.embedding(k_prev_words).squeeze(1) attention_weighted_encoding, alpha = \ decoder.attention(encoder_out, h) # (s, enc_image_size, enc_image_size) alpha = alpha.view(-1, enc_image_size, enc_image_size) # gating scalar, (s, encoder_dim) gate = decoder.sigmoid(decoder.f_beta(h)) attention_weighted_encoding = gate * attention_weighted_encoding # (s, decoder_dim) h, c = decoder.decode_step(torch.cat([embeddings, \ attention_weighted_encoding], dim=1), (h, c)) # (s, vocab_size) scores = decoder.fc(h) scores = F.log_softmax(scores, dim=1) # Add # (s, vocab_size) scores = top_k_scores.expand_as(scores) + scores # For the first step, all k points will have the same # scores (since same k previous words, h, c) if step == 1: top_k_scores, top_k_words = scores[0].topk(beam_size, 0) else: top_k_scores, top_k_words = scores.view(-1).topk(beam_size, 0) # Convert unrolled indices to actual indices of scores prev_word_inds = top_k_words / vocab_size # (s) next_word_inds = top_k_words % vocab_size # (s) # Add new words to sequences, alphas # (s, step+1) seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1, enc_image_size, enc_image_size) seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)], dim=1) # Which sequences are incomplete (didn't reach <end>)? incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if next_word != word_map['<end>']] complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds)) # Set aside complete sequences if len(complete_inds) > 0: complete_seqs.extend(seqs[complete_inds].tolist()) complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist()) complete_seqs_scores.extend(top_k_scores[complete_inds]) # reduce beam length accordingly beam_size -= len(complete_inds) # Proceed with incomplete sequences if beam_size == 0: break seqs = seqs[incomplete_inds] seqs_alpha = seqs_alpha[incomplete_inds] h = h[prev_word_inds[incomplete_inds]] c = c[prev_word_inds[incomplete_inds]] encoder_out = encoder_out[prev_word_inds[incomplete_inds]] top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1) k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1) # Break if things have been going on too long if step > 50: break step += 1 i = complete_seqs_scores.index(max(complete_seqs_scores)) seq = complete_seqs[i] alphas = complete_seqs_alpha[i] return seq, alphas def visualize_att(self, image, seq, alphas, rev_word_map, smooth=True): """ Visualizes caption with weights at every word. Adapted from paper authors' repo: https://github.com/kelvinxu/arctic-captions/blob/master/alpha_visualization.ipynb :param image: image that has been captioned :param seq: caption :param alphas: weights :param rev_word_map: reverse word mapping, i.e. ix2word :param smooth: smooth weights? """ words = [rev_word_map[ind] for ind in seq] for t in range(len(words)): if t > 50: break plt.subplot(np.ceil(len(words) / 5.), 5, t + 1) plt.text(0, 1, '%s' % (words[t]), color='black', backgroundcolor='white', fontsize=12) plt.imshow(image) current_alpha = alphas[t, :] if smooth: alpha = skimage.transform.pyramid_expand(current_alpha.numpy(), upscale=24, sigma=8) else: alpha = skimage.transform.resize(current_alpha.numpy(), [14 * 24, 14 * 24]) if t == 0: plt.imshow(alpha, alpha=0) else: plt.imshow(alpha, alpha=0.8) plt.set_cmap(cm.Greys_r) plt.axis('off') plt.show() class AverageMeter: """ Keeps track of most recent, average, sum, and count of a metric. """ def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count
{ "repo_name": "MegaShow/college-programming", "path": "Homework/Principles of Artificial Neural Networks/Week 15 Image Caption/src/action.py", "copies": "1", "size": "7605", "license": "mit", "hash": 1808548475207819800, "line_mean": 35.9174757282, "line_max": 123, "alpha_frac": 0.5662064431, "autogenerated": false, "ratio": 3.8545362392295996, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49207426823295997, "avg_score": null, "num_lines": null }
"""Action Preparation: Functions to prepare a source code fragment to be sticked into the lexical analyzer. This includes the following: -- pattern matches: (optional) line and column counting based on the character content of the lexeme. Many times, the character or line number count is determined by the pattern, so counting can be replaced by an addition of a constant (or even no count at all). -- end of file/stream action: If not defined by the user, send 'TERMINATION' token and return. -- failure action (no match): If not defined by the user, abort program with a message that tells that the user did not define an 'on_failure' handler. (C) 2005-2011 Frank-Rene Schaefer """ from quex.engine.generator.action_info import CodeFragment, \ PatternActionInfo from quex.engine.generator.languages.address import get_plain_strings from quex.blackboard import setup as Setup, \ E_Count import re LanguageDB = None def do(Mode, IndentationSupportF, BeginOfLineSupportF): """The module 'quex.output.cpp.core' produces the code for the state machine. However, it requires a certain data format. This function adapts the mode information to this format. Additional code is added -- for counting newlines and column numbers. This happens inside the function ACTION_ENTRY(). -- (optional) for a virtual function call 'on_action_entry()'. -- (optional) for debug output that tells the line number and column number. """ global LanguageDB LanguageDB = Setup.language_db assert Mode.__class__.__name__ == "Mode" variable_db = {} # -- 'on after match' action on_after_match_str = "" require_terminating_zero_preparation_f = False if Mode.has_code_fragment_list("on_after_match"): on_after_match_str, \ require_terminating_zero_preparation_f = get_code(Mode.get_code_fragment_list("on_after_match"), variable_db) # -- 'end of stream' action end_of_stream_action, db = __prepare_end_of_stream_action(Mode, IndentationSupportF, BeginOfLineSupportF) variable_db.update(db) # -- 'on failure' action (on the event that nothing matched) on_failure_action, db = __prepare_on_failure_action(Mode, BeginOfLineSupportF, require_terminating_zero_preparation_f) variable_db.update(db) # -- pattern-action pairs pattern_action_pair_list = Mode.get_pattern_action_pair_list() indentation_counter_terminal_id = Mode.get_indentation_counter_terminal_index() # Assume pattern-action pairs (matches) are sorted and their pattern state # machine ids reflect the sequence of pattern precedence. for pattern_info in pattern_action_pair_list: action = pattern_info.action() pattern = pattern_info.pattern() # Generated code fragments may rely on some information about the generator if hasattr(action, "data") and type(action.data) == dict: action.data["indentation_counter_terminal_id"] = indentation_counter_terminal_id prepared_action, db = __prepare(Mode, action, pattern, \ SelfCountingActionF=False, \ BeginOfLineSupportF=BeginOfLineSupportF, require_terminating_zero_preparation_f=require_terminating_zero_preparation_f) variable_db.update(db) pattern_info.set_action(prepared_action) return variable_db, \ pattern_action_pair_list, \ PatternActionInfo(None, end_of_stream_action), \ PatternActionInfo(None, on_failure_action), \ on_after_match_str Lexeme_matcher = re.compile("\\bLexeme\\b", re.UNICODE) def get_code(CodeFragmentList, variable_db={}, IndentationBase=1): global Lexeme_matcher code_str = "" for code_info in CodeFragmentList: result = code_info.get_code() if type(result) == tuple: result, add_variable_db = result variable_db.update(add_variable_db) if type(result) == list: code_str += "".join(get_plain_strings(result)) else: code_str += result # If 'Lexeme' occurs as an isolated word, then ensure the generation of # a terminating zero. Note, that the occurence of 'LexemeBegin' does not # ensure the preparation of a terminating zero. require_terminating_zero_f = (Lexeme_matcher.search(code_str) is not None) return pretty_code(code_str, IndentationBase), require_terminating_zero_f def __prepare(Mode, CodeFragment_or_CodeFragments, ThePattern, Default_ActionF=False, EOF_ActionF=False, SelfCountingActionF=False, BeginOfLineSupportF=False, require_terminating_zero_preparation_f=False): """-- If there are multiple handlers for a single event they are combined -- Adding debug information printer (if desired) -- The task of this function is it to adorn the action code for each pattern with code for line and column number counting. """ assert Mode.__class__.__name__ == "Mode" assert ThePattern is None or ThePattern.__class__.__name__ == "Pattern" assert type(Default_ActionF) == bool assert type(EOF_ActionF) == bool # We assume that any state machine presented here has been propperly created # and thus contains some side information about newline number, character number etc. if type(CodeFragment_or_CodeFragments) == list: assert Default_ActionF or EOF_ActionF, \ "Action code formatting: Multiple Code Fragments can only be specified for default or\n" + \ "end of stream action." CodeFragmentList = CodeFragment_or_CodeFragments else: CodeFragmentList = [ CodeFragment_or_CodeFragments ] user_code = "" variable_db = {} # (*) Code to be performed on every match -- before the related action on_match_code = "" if Mode.has_code_fragment_list("on_match"): on_match_code, rtzp_f = get_code(Mode.get_code_fragment_list("on_match"), variable_db) require_terminating_zero_preparation_f = require_terminating_zero_preparation_f or rtzp_f # (*) Code to count line and column numbers lc_count_code = "" if not SelfCountingActionF: lc_count_code = " %s\n" % __get_line_and_column_counting(ThePattern, EOF_ActionF) #if (not Default_ActionF) and (not EOF_ActionF): # lc_count_code += " __QUEX_ASSERT_COUNTER_CONSISTENCY(&self.counter);\n" # (*) THE user defined action to be performed in case of a match user_code, rtzp_f = get_code(CodeFragmentList, variable_db) require_terminating_zero_preparation_f = require_terminating_zero_preparation_f or rtzp_f store_last_character_str = "" if BeginOfLineSupportF: store_last_character_str = " %s\n" % LanguageDB.ASSIGN("me->buffer._character_before_lexeme_start", LanguageDB.INPUT_P_DEREFERENCE(-1)) set_terminating_zero_str = "" if require_terminating_zero_preparation_f: set_terminating_zero_str += " QUEX_LEXEME_TERMINATING_ZERO_SET(&me->buffer);\n" txt = "" txt += lc_count_code txt += store_last_character_str txt += set_terminating_zero_str txt += on_match_code txt += " {\n" txt += user_code txt += "\n }" return CodeFragment(txt), variable_db def __prepare_end_of_stream_action(Mode, IndentationSupportF, BeginOfLineSupportF): if not Mode.has_code_fragment_list("on_end_of_stream"): # We cannot make any assumptions about the token class, i.e. whether # it can take a lexeme or not. Thus, no passing of lexeme here. txt = "self_send(__QUEX_SETTING_TOKEN_ID_TERMINATION);\n" txt += "RETURN;\n" Mode.set_code_fragment_list("on_end_of_stream", CodeFragment(txt)) if IndentationSupportF: if Mode.default_indentation_handler_sufficient(): code = "QUEX_NAME(on_indentation)(me, /*Indentation*/0, LexemeNull);\n" else: code = "QUEX_NAME(%s_on_indentation)(me, /*Indentation*/0, LexemeNull);\n" % Mode.name code_fragment = CodeFragment(code) Mode.insert_code_fragment_at_front("on_end_of_stream", code_fragment) # RETURNS: end_of_stream_action, db return __prepare(Mode, Mode.get_code_fragment_list("on_end_of_stream"), None, EOF_ActionF=True, BeginOfLineSupportF=BeginOfLineSupportF) def __prepare_on_failure_action(Mode, BeginOfLineSupportF, require_terminating_zero_preparation_f): if not Mode.has_code_fragment_list("on_failure"): txt = "QUEX_ERROR_EXIT(\"\\n Match failure in mode '%s'.\\n\"\n" % Mode.name txt += " \" No 'on_failure' section provided for this mode.\\n\"\n" txt += " \" Proposal: Define 'on_failure' and analyze 'Lexeme'.\\n\");\n" Mode.set_code_fragment_list("on_failure", CodeFragment(txt)) # RETURNS: on_failure_action, db return __prepare(Mode, Mode.get_code_fragment_list("on_failure"), None, Default_ActionF=True, BeginOfLineSupportF=BeginOfLineSupportF, require_terminating_zero_preparation_f=require_terminating_zero_preparation_f) def __get_line_and_column_counting(ThePattern, EOF_ActionF): global LanguageDB if EOF_ActionF: return "__QUEX_COUNT_END_OF_STREAM_EVENT(self.counter);" if ThePattern is None: return "__QUEX_COUNT_VOID(self.counter);" newline_n = ThePattern.newline_n character_n = ThePattern.character_n if newline_n == E_Count.VOID: # Run the general algorithm, since not even the number of newlines in the # pattern can be determined directly from the pattern return "__QUEX_COUNT_VOID(self.counter);" elif newline_n != 0: if ThePattern.sm.get_ending_character_set().contains_only(ord('\n')): # A pattern that ends with newline, lets the next column start at one. return "__QUEX_COUNT_NEWLINE_N_FIXED_COLUMN_N_ZERO(self.counter, %i);" % newline_n # TODO: Try to determine number of characters backwards to newline directly # from the pattern state machine. (Those seldom cases won't bring much # speed-up) return "__QUEX_COUNT_NEWLINE_N_FIXED_COLUMN_N_VOID(self.counter, %i);" % newline_n # Lexeme does not contain newline --> count only columns if character_n == E_Count.VOID: incr_str = "LexemeL" else: incr_str = "%i" % int(character_n) return "__QUEX_COUNT_NEWLINE_N_ZERO_COLUMN_N_FIXED(self.counter, %s);" % incr_str def pretty_code(Code, Base): """-- Delete empty lines at the beginning -- Delete empty lines at the end -- Strip whitespace after last non-whitespace -- Propper Indendation based on Indentation Counts Base = Min. Indentation """ class Info: def __init__(self, IndentationN, Content): self.indentation = IndentationN self.content = Content info_list = [] no_real_line_yet_f = True indentation_set = set() for line in Code.split("\n"): line = line.rstrip() # Remove trailing whitespace if len(line) == 0 and no_real_line_yet_f: continue else: no_real_line_yet_f = False content = line.lstrip() if len(content) != 0 and content[0] == "#": indentation = 0 else: indentation = len(line) - len(content) + Base info_list.append(Info(indentation, content)) indentation_set.add(indentation) # Discretize indentation levels indentation_list = list(indentation_set) indentation_list.sort() # Collect the result result = [] # Reverse so that trailing empty lines are deleted no_real_line_yet_f = True for info in reversed(info_list): if len(info.content) == 0 and no_real_line_yet_f: continue else: no_real_line_yet_f = False indentation_level = indentation_list.index(info.indentation) result.append("%s%s\n" % (" " * indentation_level, info.content)) return "".join(reversed(result))
{ "repo_name": "coderjames/pascal", "path": "quex-0.63.1/quex/output/cpp/action_preparation.py", "copies": "1", "size": "12720", "license": "bsd-2-clause", "hash": 1869354137626608000, "line_mean": 42.8620689655, "line_max": 122, "alpha_frac": 0.6286163522, "autogenerated": false, "ratio": 3.7981487011048074, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49267650533048074, "avg_score": null, "num_lines": null }
# action.py # --------- from game import * class Action(object): ''' An Action object would basically be used to formalize moves defined by a paticular piece and it's new position. It will be used to determine whether an action is feasable in terms of collisions etc. or not and whether an action involves a capture. ''' def __init__(self, piece, (x,y), config): self.piece = piece self.newPos = (x,y) self.color = piece.color self.promotion = False # For Pawn Promotion self.player_pieces = config.getPlayerPieces(self.color) self.enemy_pieces = config.getEnemyPieces(self.color) def toString(self): return self.piece.toString() + " -> " + str(self.newPos) def isValid(self): ''' Checks for direct position collisions with same colored pieces''' for piece in self.player_pieces: if self.newPos == piece.pos: return False return True def isCapture(self): ''' Returns whether this action results in a capture or not ''' return self.newPos in [enemypiece.pos for enemypiece in self.enemy_pieces] def capturedPiece(self): ''' Returns the piece object which was captured in the respective action ''' for enemypiece in self.enemy_pieces: if self.newPos == enemypiece.pos: return enemypiece return None
{ "repo_name": "AhanM/ChessAI", "path": "action.py", "copies": "1", "size": "1274", "license": "mit", "hash": 7276196274153970000, "line_mean": 25.0204081633, "line_max": 76, "alpha_frac": 0.7064364207, "autogenerated": false, "ratio": 3.192982456140351, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9166956586352428, "avg_score": 0.0464924580975845, "num_lines": 49 }
"""Actions defined for Java Web Applications that are installed into Tomcat. """ import os import os.path import urllib import fixup_python_path from engage.drivers.action import * from engage.drivers.action import _check_file_exists import engage.utils.http as httputils import engage.utils.log_setup logger = engage.utils.log_setup.setup_engage_logger(__name__) from engage.utils.user_error import EngageErrInf, UserError, convert_exc_to_user_error import gettext _ = gettext.gettext errors = { } def define_error(error_code, msg): global errors error_info = EngageErrInf(__name__, error_code, msg) errors[error_info.error_code] = error_info # error codes ERR_TOMCAT_STATREQ = 1 ERR_TOMCAT_BADSTAT = 2 ERR_TOMCAT_NOSTAT = 3 ERR_TOMCAT_START = 4 ERR_TOMCAT_STOP = 5 ERR_TOMCAT_DEPLOY = 6 ERR_TOMCAT_UNDEPLOY = 7 define_error(ERR_TOMCAT_STATREQ, _("An error occurred when attempting to obtain the status of the Apache Tomcat applications in resource %(id)s: %(status)s")) define_error(ERR_TOMCAT_BADSTAT, _("Tomcat server returned unexpected status '%(stat)s' for application '%(path)s' in resource %(id)s")) define_error(ERR_TOMCAT_NOSTAT, _("Tomcat server did not return a status entry for application '%(path)s' in resource %(id)s. Perhaps that application was not deployed successfully.")) define_error(ERR_TOMCAT_START, _("Tomcat server was unable to start application '%(path)s' for resource %(id)s: %(response)s.")) define_error(ERR_TOMCAT_STOP, _("Tomcat server was unable to stop application '%(path)s' for resource %(id)s: %(response)s.")) define_error(ERR_TOMCAT_DEPLOY, _("Tomcat server was unable to deploy application '%(path)s' for resource %(id)s: %(response)s.")) define_error(ERR_TOMCAT_UNDEPLOY, _("Tomcat server was unable to undeploy application '%(path)s' for resource %(id)s: %(response)s.")) MANAGER_REALM = "Tomcat Manager Application" def _make_request(uri, user, password): return httputils.make_request_with_basic_authentication(uri, MANAGER_REALM, user, password, logger=logger) _args_format_string = \ '%(name)s {"hostname":"%(hostname)s", "manager_port":%(port)d, "admin_user":"%(user)s", "admin_password":"****"}, %(app_path)s' def status_request_fn(resource_id, server_host, server_port, app_path, user, password, install_check=False): """Return true if app running, false if stopped. Throw an error otherwise. If install_check is True, then we are checking whether the app is installed at all. If not, return None instead of throwing an error. """ result = _make_request("http://%s:%d/manager/list" % (server_host, server_port), user, password) if result[0:2]!="OK": status = result.split("\n")[0] logger.error("Tomcat status request failed: %s" % status) raise UserError(errors[ERR_TOMCAT_STATREQ], msg_args={"id":resource_id, "status": status}) content = result.split("\n")[1:] for line in content: fields = line.split(":") if fields[0]!=app_path: continue status = fields[1] if status=="running": return True elif status=="stopped": return False else: raise UserError(errors[ERR_TOMCAT_BADSTAT], msg_args={"id":resource_id, "status":status, "path":app_path}) # if we get here, didn't find the app if install_check: return None else: raise UserError(errors[ERR_TOMCAT_NOSTAT], msg_args={"id":resource_id, "path":app_path}) class status_request(ValueAction): """ValueAction: Request the status of the specified Tomcat web application. Takes the 'tomcat' output port from the apache-tomcat resource and the application path (url). """ NAME = "tomcat_utils.status_request" def __init__(self, ctx): ValueAction.__init__(self, ctx) def run(self, tomcat_port, app_path, admin_password): return status_request_fn(self.ctx.props.id, tomcat_port.hostname, tomcat_port.manager_port, app_path, tomcat_port.admin_user, admin_password) def dry_run(self, tomcat_port, app_path, admin_password): return None def format_action_args(action_name, tomcat_port, app_path, admin_password): return _args_format_string % { "name":action_name, "hostname": tomcat_port.hostname, "port":tomcat_port.manager_port, "user":tomcat_port.admin_user, "app_path":app_path } class is_app_installed(ValueAction): """ValueAction: Return True if app is installed (either stopped or running) and False otherwise. Takes the 'tomcat' output port from the apache-tomcat resource and the application path (url). """ NAME = "tomcat_utils.is_app_installed" def __init__(self, ctx): ValueAction.__init__(self, ctx) def run(self, tomcat_port, app_path, admin_password): result = status_request_fn(self.ctx.props.id, tomcat_port.hostname, tomcat_port.manager_port, app_path, tomcat_port.admin_user, admin_password, install_check=True) return result!=None def dry_run(self, tomcat_port, app_path, admin_password): return None def format_action_args(action_name, tomcat_port, app_path, admin_password): return _args_format_string % { "name":action_name, "hostname": tomcat_port.hostname, "port":tomcat_port.manager_port, "user":tomcat_port.admin_user, "app_path":app_path } class WarAction(Action): """Base class for WAR file actions. """ def __init__(self, ctx): Action.__init__(self, ctx) def dry_run(self, tomcat_port, app_path, admin_password): return None def format_action_args(action_name, tomcat_port, app_path, admin_password): return _args_format_string % { "name":action_name, "hostname": tomcat_port.hostname, "port":tomcat_port.manager_port, "user":tomcat_port.admin_user, "app_path":app_path } def start_app_fn(resource_id, server_host, server_port, app_path, user, password): result = _make_request("http://%s:%d/manager/start?%s" % (server_host, server_port, urllib.urlencode({"path":app_path})), user, password) if result[0:2]!="OK": raise UserError(errors[ERR_TOMCAT_START], msg_args={"id":resource_id, "path":app_path, "response": result}) class start_app(WarAction): """Action: Start a java web appliction via the tomcat manager http api. """ NAME = "tomcat_utils.start_app" def __init__(self, ctx): WarAction.__init__(self, ctx) def run(self, tomcat_port, app_path, admin_pasword): start_app_fn(self.ctx.props.id, tomcat_port.hostname, tomcat_port.manager_port, app_path, tomcat_port.admin_user, admin_password) def stop_app_fn(resource_id, server_host, server_port, app_path, user, password): result = _make_request("http://%s:%d/manager/stop?%s" % (server_host, server_port, urllib.urlencode({"path":app_path})), user, password) if result[0:2]!="OK": raise UserError(errors[ERR_TOMCAT_STOP], msg_args={"id":resource_id, "path":app_path, "response": result}) class stop_app(WarAction): """Action: Stop a java web appliction via the tomcat manager http api. """ NAME = "tomcat_utils.stop_app" def __init__(self, ctx): WarAction.__init__(self, ctx) def run(self, tomcat_port, app_path, admin_password): stop_app_fn(self.ctx.props.id, tomcat_port.hostname, tomcat_port.manager_port, app_path, tomcat_port.admin_user, admin_password) def deploy_app_fn(resource_id, server_host, server_port, app_path, app_war_file, user, password, update=False, tag=None): params = {"path":app_path} params["war"] = "file:" + \ os.path.abspath(os.path.expanduser(app_war_file)) if update: params["update"] = "true" if tag: params["tag"] = tag uri = "http://%s:%d/manager/deploy?%s" % \ (server_host, server_port, urllib.urlencode(params)) with open(app_war_file, "rb") as f: result = \ httputils.make_request_with_basic_authentication( uri, MANAGER_REALM, user, password, logger=logger) # This version tried to send the file directly using PUT. # Unfortunately, the python code seems to be expecting unicode # data and errors out. Not sure what kind of encoding the # tomcat side is expecting. ## httputils.make_request_with_basic_authentication( ## uri, MANAGER_REALM, user, password, ## data=f.read(), ## content_type="application/java-archive", ## request_method="PUT", ## logger=logger) if result[0:2]!="OK": raise UserError(errors[ERR_TOMCAT_DEPLOY], msg_args={"id":resource_id, "path":app_path, "response": result}, developer_msg="uri=%s" % uri) _deploy_args_format_string = \ '%(name)s {"hostname":"%(hostname)s", "manager_port":%(port)d, "admin_user":"%(user)s", "admin_password":"****"}, %(app_path)s, %(warfile)s, update=%(update)s, tag=%(tag)s' class deploy_app(Action): """Action: Deploy a java web appliction via the tomcat manager http api. """ NAME = "tomcat_utils.deploy_app" def __init__(self, ctx): Action.__init__(self, ctx) def run(self, tomcat_port, app_path, warfile_path, admin_password, update=False, tag=None): _check_file_exists(warfile_path, self) deploy_app_fn(self.ctx.props.id, tomcat_port.hostname, tomcat_port.manager_port, app_path, warfile_path, tomcat_port.admin_user, admin_password, update=update, tag=tag) def dry_run(self, tomcat_port, app_path, warfile_path, admin_password, update=False, tag=None): _check_file_exists(warfile_path, self) def format_action_args(action_name, tomcat_port, app_path, warfile_path, admin_password, update=False, tag=None): return _deploy_args_format_string % { "name":action_name, "hostname": tomcat_port.hostname, "port":tomcat_port.manager_port, "user":tomcat_port.admin_user, "app_path":app_path, "warfile":warfile_path, "update":str(update), "tag":str(tag) } def undeploy_app_fn(resource_id, server_host, server_port, app_path, user, password): result = _make_request("http://%s:%d/manager/undeploy?%s" % (server_host, server_port, urllib.urlencode({"path":app_path})), user, password) if result[0:2]!="OK": raise UserError(errors[ERR_TOMCAT_UNDEPLOY], msg_args={"id":resource_id, "path":app_path, "response": result}) class updeploy_app(WarAction): """Action: Updeploy a java web appliction via the tomcat manager http api. """ NAME = "tomcat_utils.undeploy_app" def __init__(self, ctx): WarAction.__init__(self, ctx) def run(self, tomcat_port, app_path, admin_password): undeploy_app_fn(self.ctx.props.id, tomcat_port.hostname, tomcat_port.manager_port, app_path, tomcat_port.admin_user, admin_password) @make_value_action def _check_tomcat_url(self, tomcat_port): if httputils.check_url(tomcat_port.hostname, tomcat_port.manager_port, "/", self.ctx.logger): return True else: return False def ensure_tomcat_running(ctx, tomcat_port): """This is a sequence of actions that verifies that tomcat is up, starting it if necessary. """ pid = ctx.rv(get_server_status, tomcat_port.pid_file) if pid == None: startup_script = os.path.join(tomcat_port.home, "bin/startup.sh") ctx.r(run_program, [startup_script], cwd=os.path.dirname(startup_script)) # wait for pid file ctx.check_poll(10, 1.5, lambda pid: pid!=None, get_server_status, tomcat_port.pid_file) ctx.logger.debug("Tomcat started successfully") else: ctx.logger.debug("Tomcat already running") # wait for a response on the tomcat url ctx.check_poll(10, 1.5, lambda rsp: rsp, _check_tomcat_url, tomcat_port) def ensure_tomcat_stopped(ctx, tomcat_port): """This is a sequence of actions that verifies tomcat is stopped, stopping it if necessary. """ pid = ctx.rv(get_server_status, tomcat_port.pid_file) if pid != None: shutdown_script = os.path.join(tomcat_port.home, "bin/shutdown.sh") ctx.r(run_program, [shutdown_script], cwd=os.path.dirname(shutdown_script)) # wait for pid file ctx.check_poll(10, 1.5, lambda pid: pid==None, get_server_status, tomcat_port.pid_file) ctx.logger.debug("Tomcat stopped successfully") else: ctx.logger.debug("Tomcat already stopped") def tst_lifecycle(server_host, server_port, app_path, warfile_path, user, password): """Given a warfile and the server info, test the full lifecycle. """ rid = os.path.basename(warfile_path) print "deploying app" deploy_app_fn(rid, server_host, server_port, app_path, warfile_path, user, password) status = status_request_fn(rid, server_host, server_port, app_path, user, password) assert status==True, "App was not started in deployment" print "stopping app" stop_app_fn(rid, server_host, server_port, app_path, user, password) status = status_request_fn(rid, server_host, server_port, app_path, user, password) assert status==False, "App was not stopped" print "restarting app" start_app_fn(rid, server_host, server_port, app_path, user, password) status = status_request_fn(rid, server_host, server_port, app_path, user, password) assert status==True, "App was not restarted" print "undeploying app" undeploy_app_fn(rid, server_host, server_port, app_path, user, password) print "lifecycle test complete"
{ "repo_name": "quaddra/engage", "path": "python_pkg/engage/drivers/genforma/tomcat_utils.py", "copies": "1", "size": "16336", "license": "apache-2.0", "hash": -3056628846791111700, "line_mean": 38.2692307692, "line_max": 172, "alpha_frac": 0.5528893242, "autogenerated": false, "ratio": 3.8150397010742645, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4867929025274264, "avg_score": null, "num_lines": null }
""" Action set for the Oracl plugin """ # Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and # University Hospital Center and University of Lausanne (UNIL-CHUV) # # Modified BSD License # Enthought library imports from envisage.ui.action.api import Action, Group, Menu, ToolBar from envisage.ui.workbench.api import WorkbenchActionSet networkrepo = Action( id = "OracleCNetworkReport", class_name = "cviewer.plugins.codeoracle.actions.NetworkReport", name = "Network Report", path = "MenuBar/Code Oracle/Connectome/CNetwork/Analysis" ) xnat_pushpull = Action( id = "OracleXNATPushPull", class_name = "cviewer.plugins.codeoracle.actions.XNATPushPull", name = "XNAT Push and Pull", path = "MenuBar/Code Oracle/Other/XNAT" ) show_surface = Action( id = "OracleCSurface", class_name = "cviewer.plugins.codeoracle.actions.ShowSurfaces", name = "Show Surface", path = "MenuBar/Code Oracle/Connectome/CSurface/Visualization" ) show_volumecre = Action( id = "OracleCVolumeCre", class_name = "cviewer.plugins.codeoracle.actions.ShowVolumes", name = "Volume Creation", path = "MenuBar/Code Oracle/Connectome/CVolume/Visualization" ) show_network = Action( id = "OracleCNetwork3D", class_name = "cviewer.plugins.codeoracle.actions.ShowNetworks", name = "3D Network", path = "MenuBar/Code Oracle/Connectome/CNetwork/Visualization" ) show_network2 = Action( id = "OracleCNetwork3D2", class_name = "cviewer.plugins.codeoracle.actions.NetworkVizTubes", name = "3D Network (with tubes and node color)", path = "MenuBar/Code Oracle/Connectome/CNetwork/Visualization" ) connection_matrix = Action( id = "OracleCNetworkMat", class_name = "cviewer.plugins.codeoracle.actions.ConnectionMatrix", name = "Connection Matrix", path = "MenuBar/Code Oracle/Connectome/CNetwork/Visualization" ) simple_connection_matrix = Action( id = "OracleCNetworkSimpleMat", class_name = "cviewer.plugins.codeoracle.actions.SimpleConnectionMatrix", name = "Simple Connection Matrix", path = "MenuBar/Code Oracle/Connectome/CNetwork/Visualization" ) writegexf = Action( id = "OracleCNetworkWriteGEXF", class_name = "cviewer.plugins.codeoracle.actions.WriteGEXF", name = "Write Gephi GEXF", path = "MenuBar/Code Oracle/Connectome/CNetwork/Analysis" ) compute_nbs = Action( id = "OracleNBS", class_name = "cviewer.plugins.codeoracle.actions.ComputeNBS", name = "Network-based statistic (NBS)", path = "MenuBar/Code Oracle/Statistics" ) show_tracks = Action( id = "OracleShowTracks", class_name = "cviewer.plugins.codeoracle.actions.ShowTracks", name = "Tracks between regions", path = "MenuBar/Code Oracle/Connectome/CTrack/Visualization" ) cortico_cortico = Action( id = "OracleCorticoCorticoTracks", class_name = "cviewer.plugins.codeoracle.actions.CorticoCortico", name = "Extract cortico-cortico fiber tracks", path = "MenuBar/Code Oracle/Connectome/CTrack/Analysis" ) nipype_bet = Action( id = "OracleNipypeBet", class_name = "cviewer.plugins.codeoracle.actions.NipypeBet", name = "Brain extraction using BET", path = "MenuBar/Code Oracle/Other/Nipype" ) class OracleActionSet(WorkbenchActionSet): """ The actionset for the Oracle plugin """ id = "cviewer.plugins.codeoracle.action_set" actions = [ show_surface, show_network, show_network2, compute_nbs, show_volumecre, connection_matrix, simple_connection_matrix, show_tracks, cortico_cortico, xnat_pushpull, nipype_bet, networkrepo, writegexf ]
{ "repo_name": "LTS5/connectomeviewer", "path": "cviewer/plugins/codeoracle/oracle_action_set.py", "copies": "1", "size": "4159", "license": "bsd-3-clause", "hash": 1327801376618059800, "line_mean": 31.7480314961, "line_max": 80, "alpha_frac": 0.6301995672, "autogenerated": false, "ratio": 3.5067453625632377, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9502624664055025, "avg_score": 0.02686405314164248, "num_lines": 127 }
"""Actions for aws_cdn plugin""" from __future__ import absolute_import, division, print_function, with_statement, unicode_literals # Module for generating hashes for files that match a glob, and putting that # hash in redis to allow us to generate cache-busting URLs later import os import oz import oz.redis import oz.aws_cdn @oz.action def cache_busting_scan(*prefixes): """ (Re-)generates the cache buster values for all files with the specified prefixes. """ redis = oz.redis.create_connection() pipe = redis.pipeline() # Get all items that match any of the patterns. Put it in a set to # prevent duplicates. if oz.settings["s3_bucket"]: bucket = oz.aws_cdn.get_bucket() matches = set([oz.aws_cdn.S3File(key) for prefix in prefixes for key in bucket.list(prefix)]) else: matches = set([]) static_path = oz.settings["static_path"] for root, _, filenames in os.walk(static_path): for filename in filenames: path = os.path.relpath(os.path.join(root, filename), static_path) for prefix in prefixes: if path.startswith(prefix): matches.add(oz.aws_cdn.LocalFile(static_path, path)) break # Set the cache busters for f in matches: file_hash = f.hash() print(file_hash, f.path()) oz.aws_cdn.set_cache_buster(pipe, f.path(), file_hash) pipe.execute()
{ "repo_name": "dailymuse/oz", "path": "oz/aws_cdn/actions.py", "copies": "1", "size": "1493", "license": "bsd-3-clause", "hash": 5309358225404943000, "line_mean": 30.7659574468, "line_max": 101, "alpha_frac": 0.6222371065, "autogenerated": false, "ratio": 3.7989821882951653, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4921219294795165, "avg_score": null, "num_lines": null }
"""Actions for the help menu """ # Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and # University Hospital Center and University of Lausanne (UNIL-CHUV) # # Modified BSD License # Enthought library imports from pyface.action.api import Action from traitsui.api import auto_close_message from mayavi.preferences.api import preference_manager from pyface.image_resource import ImageResource # Connectome Viewer imports from common import IMAGE_PATH def browser_open(url, decorated = False): import os import sys if sys.platform == 'darwin': os.system('open %s &' % url) else: import webbrowser if webbrowser._iscommand('firefox') and \ preference_manager.root.open_help_in_light_browser: # Firefox is installed, let's use it, we know how to make it # chromeless. if decorated: webbrowser.open(url, autoraise=1) else: firefox = webbrowser.get('firefox') firefox._invoke(['-chrome', url], remote=False, autoraise=True) else: webbrowser.open(url, autoraise=1) class Bugfix(Action): """ An action that pop up the bugfix GitHub page in a browser. """ name = "Bugfixes" tooltip = "Bug Fixes ..." description = "Bug Fixes ..." image = ImageResource("bug.png", search_path=[IMAGE_PATH]) ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ browser_open(url='http://github.com/LTS5/connectomeviewer/issues', decorated = True) class Keybindings(Action): """ An action that creates a temporary html file to show the key bindings.. """ name = "Key Bindings" tooltip = "Show Key Bindings in Browser" description = "Key Bindings" image = ImageResource("keyboard.png", search_path=[IMAGE_PATH]) ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ import os.path browser_open(url=os.path.join(IMAGE_PATH, '..', 'keybindings', 'index.html'), decorated = True) ###################################################################### # `HelpIndex` class. ###################################################################### class HelpIndex(Action): """ An action that pop up the help in a browser. """ name = "Help" tooltip = "The Connectome Viewer User Guide" description = "The Connectome Viewer User Guide" image = ImageResource("help-browser.png", search_path=[IMAGE_PATH]) ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ auto_close_message("Opening help in web browser...") browser_open(url='http://www.connectomeviewer.org/documentation', decorated = True)
{ "repo_name": "LTS5/connectomeviewer", "path": "cviewer/action/help.py", "copies": "1", "size": "3301", "license": "bsd-3-clause", "hash": 3500136555899027000, "line_mean": 36.5113636364, "line_max": 103, "alpha_frac": 0.518024841, "autogenerated": false, "ratio": 4.610335195530726, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5628360036530726, "avg_score": null, "num_lines": null }
"""Actions for the help menu. """ # Authors: Gael Varoquaux <gael.varoquaux[at]normalesup.org> # Prabhu Ramachandran # Copyright (c) 2007-2008, Enthought, Inc. # License: BSD Style. # Standard library imports. from os import path import os import sys from os.path import join, dirname # Enthought library imports. from pyface.action.api import Action from traitsui.api import auto_close_message # Local imports import mayavi.api from mayavi.core.common import error from mayavi.preferences.api import preference_manager # To find the html documentation directory, first look under the # standard place. If that directory doesn't exist, assume you # are running from the source. local_dir = dirname(mayavi.api.__file__) HTML_DIR = join(local_dir, 'html') if not path.exists(HTML_DIR): HTML_DIR = join(dirname(dirname(local_dir)), 'build', 'docs', 'html', 'mayavi') if not path.exists(HTML_DIR): HTML_DIR = None def browser_open(url): if sys.platform == 'darwin': os.system('open %s &' % url) else: import webbrowser if webbrowser._iscommand('firefox') and \ preference_manager.root.open_help_in_light_browser: # Firefox is installed, let's use it, we know how to make it # chromeless. firefox = webbrowser.get('firefox') firefox._invoke(['-chrome', url], remote=False, autoraise=True) else: webbrowser.open(url, autoraise=1) def open_help_index(): """ Open the mayavi user manual index in a browser. """ # If the HTML_DIR was found, bring up the documentation in a # web browser. Otherwise, bring up an error message. if HTML_DIR: auto_close_message("Opening help in web browser...") browser_open(join(HTML_DIR, 'index.html')) else: error("Could not find the user guide in your installation " \ "or the source tree.") def open_tvtk_docs(): """ Open the TVTK class browser. """ from tvtk.tools.tvtk_doc import TVTKClassChooser TVTKClassChooser().edit_traits() ###################################################################### # `HelpIndex` class. ###################################################################### class HelpIndex(Action): """ An action that pop up the help in a browser. """ tooltip = "The Mayavi2 user guide" description = "The Mayavi2 user guide" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ open_help_index() ###################################################################### # `TVTKClassBrowser` class. ###################################################################### class TVTKClassBrowser(Action): """ An action that opens the tvtk interactive class browser. """ tooltip = "The TVTK interactive class browser" description = "The TVTK interactive class browser" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ open_tvtk_docs()
{ "repo_name": "dmsurti/mayavi", "path": "mayavi/action/help.py", "copies": "3", "size": "3424", "license": "bsd-3-clause", "hash": 4324595490206150000, "line_mean": 31.6095238095, "line_max": 79, "alpha_frac": 0.5286214953, "autogenerated": false, "ratio": 4.36734693877551, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6395968434075511, "avg_score": null, "num_lines": null }
# Actions implemented: # Income # Foreign Aid # Coup # Duke # Captain # Contessa # Assassin # Ambassador # Hardcoded value # ForceCoupCoins from core.errors import * from core.game import GameState ForceCoupCoins = 10 class Action: name = "" description = "" blocks = [] hasTarget = False coinsNeeded = 0 def play(self, player, target = None): """ should be overrriden by child classes returns (status, response) where status: True/False if action is successful or not response: String explaining status. Usually reserved for explanation of why an action failed. Example: return True, "Success" return False, "Failed because it was blocked" """ return False, None class Income(Action): name = "Income" description = "Gain 1 gold" def play(self, player, target = None): player.coins += 1 return True, "Success" class ForeignAid(Action): name = "Foreign Aid" description = "Gain 2 gold" def play(self, player, target = None): player.coins += 2 return True, "Success" class Coup(Action): name = "Coup" description = "Pay 7 gold to remove target player's influence" hasTarget = True coinsNeeded = 7 def play(self, player, target = None): if player.coins < self.coinsNeeded: raise NotEnoughCoins(self.coinsNeeded) # target should be alive if target == None: raise TargetRequired if not target.alive: raise InvalidTarget("Target is dead") player.coins -= 7 target.loseInfluence() return True, "Success" class Duke(Action): name = "Duke" description = "Gain 3 gold. Blocks Foreign Aid." blocks = ["Foreign Aid"] def play(self, player, target = None): player.coins += 3 return True, "Success" class Captain(Action): name = "Captain" description = "Steal 2 gold from target. Blocks Steal." blocks = ["Captain"] hasTarget = True def play(self, player, target = None): if target == None: raise TargetRequired steal = 0 if target.coins >= 2: steal = 2 elif target.coins == 1: steal = 1 target.coins -= steal if target.coins < 0: target.coins = 0 player.coins += steal return True, "Success" class Contessa(Action): name = "Contessa" description = "Blocks Assasination." blocks = ["Assassin"] def play(self, player, target = None): raise BlockOnly class Assassin(Action): name = "Assassin" description = "Assasinate. Pay 3 coins to kill a player's influence." blocks = [] hasTarget = True coinsNeeded = 3 def play(self, player, target = None): if player.coins < self.coinsNeeded: raise NotEnoughCoins(self.coinsNeeded) if target == None: raise TargetRequired player.coins -= 3 target.loseInfluence() return True, "Success" class Ambassador(Action): name = "Ambassador" description = "Exchange your influence with 2 cards from the Court Deck. Blocks Steal." blocks = ["Captain"] def play(self, player, target = None): influenceRemaining = len(player.influence) choices = list(player.influence) deckCards = [GameState.DrawCard(), GameState.DrawCard()] choices.append(deckCards[0]) choices.append(deckCards[1]) newInfluence = player.selectAmbassadorInfluence(list(choices), influenceRemaining) if type(newInfluence) != list: newInfluence = [newInfluence] def ReturnCards(): GameState.AddToDeck(deckCards[0]) GameState.AddToDeck(deckCards[1]) if len(newInfluence) != influenceRemaining: # There is a missing card. Try again. ReturnCards() raise InvalidTarget("Wrong number of cards given") choicesCopy = list(choices) # this allow us to test for card duplicates for card in newInfluence: if not card in choicesCopy: # something is wrong. The player sent a card choice that is not part of the original choices. # try again. ReturnCards() raise InvalidTarget("Card given not part of valid choices") choicesCopy.remove(card) # give the player their new cards player.influence = list(newInfluence) # return the unselected cards back to the Court Deck. for card in newInfluence: choices.remove(card) for card in choices: GameState.AddToDeck(card) return True, "Success"
{ "repo_name": "MrValdez/Coup", "path": "core/action.py", "copies": "1", "size": "5100", "license": "mit", "hash": 2853695046839659500, "line_mean": 27.813559322, "line_max": 109, "alpha_frac": 0.5690196078, "autogenerated": false, "ratio": 4.153094462540716, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.025427373682951792, "num_lines": 177 }
Actions = [] States = {} Observations = {} ObservationGenerators = {} class Action: def __init__(self): Actions.append(self) def __hash__(cls): return cls.__name__ __hash__ = classmethod(__hash__) def __str__(cls): return cls.__name__ __str__ = classmethod(__str__) def act(self,state): """Get resulting state of the action in a state. Takes a State. Returns a list of (State,Prob): State is the next state after taking the action and Prob is a float percentage likelihood of that next state. """ raise NotImplementedError def reward(self,state): """Get resulting reward of the action in a state. Takes a State. Returns a list of (Reward,Prob): Reward is an integer representation of the immediate reward and Prob is a float percentage likelihood of that reward. """ raise NotImplementedError def rewards(self): """Generate the set of all reward producing conditions for this action. Returns a list of tuples of (State,Reward,Prob): State is either a wildcard or a State, Reward is an integer representation of the immediate reward and Prob is a float percentage likelihood of that reward. """ for state in States: yield self.reward(state) class ConsistentCostAction(Action): def reward(cls,State): return [(State,cls.ActionCosts[cls.__name__],1.0),] reward = classmethod(reward) def rewards(cls): return [('*',cls.ActionCosts[cls.__name__],1.0),] rewards = classmethod(rewards) ## Transition Function ## def generateTransitionFn(): """generates transition lines for POMDP. Lines have the form: print 'T:', Action, ':', StartState, ':', EndState, Probability """ for startState,startStateStr in States.items(): for action in Actions: for endState,prob in action.act(startState): yield 'T: %s : %s : %s %1.3f\n' % (action,startStateStr,States[endState],prob) ## Observation Function # def generateObservationFn(): """generates observation lines for POMDP. Lines have the form: print 'O :', Action, ':', State, ':', Observation, Probability """ for state,stateStr in States.items(): for action,obsGenerator in ObservationGenerators.items(): for observ,prob in obsGenerator(state): yield 'O : %s : %s : %s %1.3f\n' % (action,stateStr,Observations[observ],prob) ## Reward Function ## def generateRewardFn(): """generates reward lines for POMDP. Lines have the form: R: <action> : <start-state> : <end-state> : <observation> <reward>%f """ for action in Actions: for state,reward,prob in action.rewards(): yield 'R : %s : %s : * : * : %i \n' % (action,state,reward) def writePOMDPfile(FileName,POMDP_sets,d): locals().update(d) POMDPFILE=open(FileName,'w') for name in ['discount','values','actions','observations','states','start',]: generator = POMDP_sets[name] POMDPFILE.write('%(name)s: ' % locals()) POMDPFILE.write(' '.join([str(val) for val in generator()])) POMDPFILE.write('\n') for transition in generateTransitionFn(): POMDPFILE.write(transition) for observation in generateObservationFn(): POMDPFILE.write(observation) for reward in generateRewardFn(): POMDPFILE.write(reward) POMDPFILE.close()
{ "repo_name": "ronaldahmed/SLAM-for-ugv", "path": "neural-navigation-with-lstm/MARCO/POMDP/MakePOMDP.py", "copies": "2", "size": "3504", "license": "mit", "hash": -5906448197699598000, "line_mean": 34.3939393939, "line_max": 94, "alpha_frac": 0.6247146119, "autogenerated": false, "ratio": 3.8933333333333335, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.024802897256549022, "num_lines": 99 }
# actions.py from .exceptions import ParseException from .util import col def matchOnlyAtCol(n): """Helper method for defining parse actions that require matching at a specific column in the input text. """ def verifyCol(strg, locn, toks): if col(locn, strg) != n: raise ParseException(strg, locn, "matched token not at column %d" % n) return verifyCol def replaceWith(replStr): """Helper method for common parse actions that simply return a literal value. Especially useful when used with :class:`transformString<ParserElement.transformString>` (). Example:: num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] """ return lambda s, l, t: [replStr] def removeQuotes(s, l, t): """Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use removeQuotes to strip quotation marks from parsed results quotedString.setParseAction(removeQuotes) quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] """ return t[0][1:-1] def withAttribute(*args, **attrDict): """Helper to create a validating parse action to be used with start tags created with :class:`makeXMLTags` or :class:`makeHTMLTags`. Use ``withAttribute`` to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as ``<TD>`` or ``<DIV>``. Call ``withAttribute`` with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in ``(align="right")``, or - as an explicit dict with ``**`` operator, when an attribute name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for ``class`` (with or without a namespace), use :class:`withClass`. To verify that the attribute exists, but without specifying a value, pass ``withAttribute.ANY_VALUE`` as the value. Example:: html = ''' <div> Some text <div type="grid">1 4 0 1 0</div> <div type="graph">1,3 2,3 1,1</div> <div>this has no type</div> </div> ''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ if args: attrs = args[:] else: attrs = attrDict.items() attrs = [(k, v) for k, v in attrs] def pa(s, l, tokens): for attrName, attrValue in attrs: if attrName not in tokens: raise ParseException(s, l, "no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: raise ParseException( s, l, "attribute {!r} has value {!r}, must be {!r}".format( attrName, tokens[attrName], attrValue ), ) return pa withAttribute.ANY_VALUE = object() def withClass(classname, namespace=""): """Simplified version of :class:`withAttribute` when matching on a div class - made difficult because ``class`` is a reserved word in Python. Example:: html = ''' <div> Some text <div class="grid">1 4 0 1 0</div> <div class="graph">1,3 2,3 1,1</div> <div>this &lt;div&gt; has no class</div> </div> ''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ classattr = "{}:class".format(namespace) if namespace else "class" return withAttribute(**{classattr: classname})
{ "repo_name": "InsightSoftwareConsortium/ITKExamples", "path": "Utilities/SphinxExtensions/pyparsing/actions.py", "copies": "1", "size": "5511", "license": "apache-2.0", "hash": 5097759077399753000, "line_mean": 31.8035714286, "line_max": 120, "alpha_frac": 0.6007984032, "autogenerated": false, "ratio": 3.964748201438849, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5065546604638849, "avg_score": null, "num_lines": null }
# actions.py from pyparsing.exceptions import ParseException from pyparsing.util import col def matchOnlyAtCol(n): """Helper method for defining parse actions that require matching at a specific column in the input text. """ def verifyCol(strg, locn, toks): if col(locn, strg) != n: raise ParseException(strg, locn, "matched token not at column %d" % n) return verifyCol def replaceWith(replStr): """Helper method for common parse actions that simply return a literal value. Especially useful when used with :class:`transformString<ParserElement.transformString>` (). Example:: num = Word(nums).setParseAction(lambda toks: int(toks[0])) na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) term = na | num OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] """ return lambda s, l, t: [replStr] def removeQuotes(s, l, t): """Helper parse action for removing quotation marks from parsed quoted strings. Example:: # by default, quotation marks are included in parsed results quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] # use removeQuotes to strip quotation marks from parsed results quotedString.setParseAction(removeQuotes) quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] """ return t[0][1:-1] def withAttribute(*args, **attrDict): """Helper to create a validating parse action to be used with start tags created with :class:`makeXMLTags` or :class:`makeHTMLTags`. Use ``withAttribute`` to qualify a starting tag with a required attribute value, to avoid false matches on common tags such as ``<TD>`` or ``<DIV>``. Call ``withAttribute`` with a series of attribute names and values. Specify the list of filter attributes names and values as: - keyword arguments, as in ``(align="right")``, or - as an explicit dict with ``**`` operator, when an attribute name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` For attribute names with a namespace prefix, you must use the second form. Attribute names are matched insensitive to upper/lower case. If just testing for ``class`` (with or without a namespace), use :class:`withClass`. To verify that the attribute exists, but without specifying a value, pass ``withAttribute.ANY_VALUE`` as the value. Example:: html = ''' <div> Some text <div type="grid">1 4 0 1 0</div> <div type="graph">1,3 2,3 1,1</div> <div>this has no type</div> </div> ''' div,div_end = makeHTMLTags("div") # only match div tag having a type attribute with value "grid" div_grid = div().setParseAction(withAttribute(type="grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) # construct a match with any div tag having a type attribute, regardless of the value div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ if args: attrs = args[:] else: attrs = attrDict.items() attrs = [(k, v) for k, v in attrs] def pa(s, l, tokens): for attrName, attrValue in attrs: if attrName not in tokens: raise ParseException(s, l, "no matching attribute " + attrName) if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: raise ParseException( s, l, "attribute {!r} has value {!r}, must be {!r}".format( attrName, tokens[attrName], attrValue ), ) return pa withAttribute.ANY_VALUE = object() def withClass(classname, namespace=""): """Simplified version of :class:`withAttribute` when matching on a div class - made difficult because ``class`` is a reserved word in Python. Example:: html = ''' <div> Some text <div class="grid">1 4 0 1 0</div> <div class="graph">1,3 2,3 1,1</div> <div>this &lt;div&gt; has no class</div> </div> ''' div,div_end = makeHTMLTags("div") div_grid = div().setParseAction(withClass("grid")) grid_expr = div_grid + SkipTo(div | div_end)("body") for grid_header in grid_expr.searchString(html): print(grid_header.body) div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) div_expr = div_any_type + SkipTo(div | div_end)("body") for div_header in div_expr.searchString(html): print(div_header.body) prints:: 1 4 0 1 0 1 4 0 1 0 1,3 2,3 1,1 """ classattr = "{}:class".format(namespace) if namespace else "class" return withAttribute(**{classattr: classname})
{ "repo_name": "chadmv/cmt", "path": "scripts/pyparsing/actions.py", "copies": "1", "size": "5529", "license": "mit", "hash": -5790417384529129000, "line_mean": 31.9107142857, "line_max": 120, "alpha_frac": 0.6020980286, "autogenerated": false, "ratio": 3.9719827586206895, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0010164949806162951, "num_lines": 168 }
"""Actions that can be performed on the data. An action receives a mongodb cursor and argparse args.""" from __future__ import unicode_literals, print_function from . import PPMongoError from . import analyze as analyzing from . import printing def pprint(cursor, args): "Pretty-print the documents" for i, document in enumerate(cursor): print('-' * 80, i) printing.pprint(document) def analyze(cursor, args): "Analyze the distribution of keys and values in documents" analyzing.analyze(list(cursor)) def flat(cursor, args): "Print a tab-separated table of fields values" if not args.fields: raise PPMongoError('Flat list requires a field list') paths = [key.split('.') for key in args.fields] for document in cursor: vals = [] for path in paths: pointer = document for part in path: pointer = pointer.get(part, '') if pointer == '': break vals.append(unicode(pointer)) print((u'\t'.join(vals)).encode('utf-8')) def count(cursor, args): "Count the documents" print(cursor.count())
{ "repo_name": "anossov/ppmongo", "path": "ppmongo/actions.py", "copies": "1", "size": "1172", "license": "bsd-2-clause", "hash": 7127665520776027000, "line_mean": 23.9361702128, "line_max": 62, "alpha_frac": 0.6220136519, "autogenerated": false, "ratio": 4.156028368794326, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5278042020694326, "avg_score": null, "num_lines": null }
"""Actions to run at server startup. """ from django.db import connection from django.db import transaction from south.migration import Migrations from south.models import MigrationHistory def run(): """Call this from manage.py or tests. """ _add_custom_mult_agg_function() # TODO: This breaks test_pipeline.py. Why? # _check_migrations_applied() def _add_custom_mult_agg_function(): """Make sure the Postgresql database has a custom function array_agg_mult. NOTE: Figured out the raw sql query by running psql with -E flag and then calling \df to list functions. The -E flag causes the internal raw sql of the commands to be shown. """ cursor = connection.cursor() cursor.execute( 'SELECT p.proname ' 'FROM pg_catalog.pg_proc p ' 'WHERE p.proname=\'array_agg_mult\'' ) mult_agg_exists = bool(cursor.fetchone()) if not mult_agg_exists: cursor.execute( 'CREATE AGGREGATE array_agg_mult (anyarray) (' ' SFUNC = array_cat' ' ,STYPE = anyarray' ' ,INITCOND = \'{}\'' ');' ) transaction.commit_unless_managed() def _check_migrations_applied(): """Checks that all south migrations have been applied. """ APP_NAME = 'main' all_migrations = Migrations(APP_NAME) applied_migrations = [migration.get_migration() for migration in MigrationHistory.objects.filter(app_name=APP_NAME)] not_applied = set(all_migrations) - set(applied_migrations) if len(not_applied): raise AssertionError( "Database migration required. " "Please run `./manage.py migrate main`.\n" "Applied: {applied}\n" "Missing: {not_applied}\n".format( applied=applied_migrations, not_applied=not_applied))
{ "repo_name": "woodymit/millstone", "path": "genome_designer/main/startup.py", "copies": "1", "size": "1948", "license": "mit", "hash": 7501458258816659000, "line_mean": 31.4666666667, "line_max": 78, "alpha_frac": 0.5959958932, "autogenerated": false, "ratio": 4.016494845360825, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.00047971491228070173, "num_lines": 60 }
"""Actions to run at server startup. """ from django.db import connection from django.db import transaction def run(): """Call this from manage.py or tests. """ _add_custom_mult_agg_function() def _add_custom_mult_agg_function(): """Make sure the Postgresql database has a custom function array_agg_mult. NOTE: Figured out the raw sql query by running psql with -E flag and then calling \df to list functions. The -E flag causes the internal raw sql of the commands to be shown. """ cursor = connection.cursor() cursor.execute( 'SELECT p.proname ' 'FROM pg_catalog.pg_proc p ' 'WHERE p.proname=\'array_agg_mult\'' ) mult_agg_exists = bool(cursor.fetchone()) if not mult_agg_exists: cursor.execute( 'CREATE AGGREGATE array_agg_mult (anyarray) (' ' SFUNC = array_cat' ' ,STYPE = anyarray' ' ,INITCOND = \'{}\'' ');' ) transaction.commit_unless_managed()
{ "repo_name": "woodymit/millstone_accidental_source", "path": "genome_designer/main/startup.py", "copies": "1", "size": "1068", "license": "mit", "hash": 2299502963721795300, "line_mean": 27.8648648649, "line_max": 78, "alpha_frac": 0.5767790262, "autogenerated": false, "ratio": 3.800711743772242, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4877490769972242, "avg_score": null, "num_lines": null }
"""Actions to run at server startup. """ import subprocess import sys from django.db import connection from django.db import transaction from south.migration import Migrations from south.models import MigrationHistory def run(): """Call this from manage.py or tests. """ _add_custom_mult_agg_function() _check_environment() # TODO: This breaks test_pipeline.py. Why? # _check_migrations_applied() def _add_custom_mult_agg_function(): """Make sure the Postgresql database has a custom function array_agg_mult. NOTE: Figured out the raw sql query by running psql with -E flag and then calling \df to list functions. The -E flag causes the internal raw sql of the commands to be shown. """ cursor = connection.cursor() cursor.execute( 'SELECT p.proname ' 'FROM pg_catalog.pg_proc p ' 'WHERE p.proname=\'array_agg_mult\'' ) mult_agg_exists = bool(cursor.fetchone()) if not mult_agg_exists: cursor.execute( 'CREATE AGGREGATE array_agg_mult (anyarray) (' ' SFUNC = array_cat' ' ,STYPE = anyarray' ' ,INITCOND = \'{}\'' ');' ) transaction.commit_unless_managed() def _check_migrations_applied(): """Checks that all south migrations have been applied. """ APP_NAME = 'main' all_migrations = Migrations(APP_NAME) applied_migrations = [migration.get_migration() for migration in MigrationHistory.objects.filter(app_name=APP_NAME)] not_applied = set(all_migrations) - set(applied_migrations) if len(not_applied): raise AssertionError( "Database migration required. " "Please run `./manage.py migrate main`.\n" "Applied: {applied}\n" "Missing: {not_applied}\n".format( applied=applied_migrations, not_applied=not_applied)) def _check_environment(): """Checks for software and tools in environment. NOTE: In progress. """ # Check for java. try: subprocess.check_output( ["java", "-version"], stderr=subprocess.STDOUT) except OSError: raise AssertionError("Startup Error: java not found in environment.")
{ "repo_name": "churchlab/millstone", "path": "genome_designer/main/startup.py", "copies": "1", "size": "2343", "license": "mit", "hash": 1517269235994348500, "line_mean": 29.0384615385, "line_max": 78, "alpha_frac": 0.6022193769, "autogenerated": false, "ratio": 4.110526315789474, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0003690114709851552, "num_lines": 78 }
"""Actions to save and load a MayaVi2 visualization file. """ # Author: Prabhu Ramachandran <prabhu@enthought.com> # Copyright (c) 2005-2015, Enthought, Inc. # License: BSD Style. # Standard library imports. import sys from os.path import isfile # Enthought library imports. from pyface.api import FileDialog, OK from pyface.action.api import Action # Local imports from mayavi.plugins.script import get_imayavi from mayavi.core.common import error, exception ###################################################################### # `SaveVisualization` class. ###################################################################### class SaveVisualization(Action): """ An action that saves the current visualization. """ tooltip = "Save current visualization" description = "Save current visualization to a MayaVi2 file" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ wildcard = 'MayaVi2 files (*.mv2)|*.mv2|' + FileDialog.WILDCARD_ALL dialog = FileDialog(parent=self.window.control, title='Save MayaVi2 file', action='save as', wildcard=wildcard ) if dialog.open() == OK: mv = get_imayavi(self.window) mv.save_visualization(dialog.path) ###################################################################### # `LoadVisualization` class. ###################################################################### class LoadVisualization(Action): """ An action that loads a visualization from file. """ tooltip = "Load saved visualization" description = "Load saved visualization from a MayaVi2 file" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ wildcard = 'MayaVi2 files (*.mv2)|*.mv2|' + FileDialog.WILDCARD_ALL parent = self.window.control dialog = FileDialog(parent=parent, title='Open MayaVi2 file', action='open', wildcard=wildcard ) if dialog.open() == OK: if not isfile(dialog.path): error("File '%s' does not exist"%dialog.path, parent) return mv = get_imayavi(self.window) mv.load_visualization(dialog.path) ###################################################################### # `RunScript` class. ###################################################################### class RunScript(Action): """ An action that runs a mayavi script. WARNING: this can be dangerous since the file runs execfile! """ tooltip = "Execute a Python script (typically a Mayavi script)" description = "Execute a Python script (typically a Mayavi script)" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ wildcard = 'Python files (*.py)|*.py' parent = self.window.control dialog = FileDialog(parent=parent, title='Open Python file', action='open', wildcard=wildcard ) if dialog.open() == OK: if not isfile(dialog.path): error("File '%s' does not exist"%dialog.path, parent) return # Get the globals. # The following code is taken from scripts/mayavi2.py. g = sys.modules['__main__'].__dict__ if 'mayavi' not in g: mv = get_imayavi(self.window) g['mayavi'] = mv g['engine'] = mv.engine # Do execfile try: # If we don't pass globals twice we get NameErrors and nope, # using exec open(script_name).read() does not fix it. exec(compile( open(dialog.path).read(), dialog.path, 'exec'), g, g ) except Exception as msg: exception(str(msg))
{ "repo_name": "dmsurti/mayavi", "path": "mayavi/action/save_load.py", "copies": "1", "size": "4529", "license": "bsd-3-clause", "hash": 5728941892957500000, "line_mean": 36.7416666667, "line_max": 79, "alpha_frac": 0.4524177523, "autogenerated": false, "ratio": 5.083052749719417, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6035470502019418, "avg_score": null, "num_lines": null }
"""Actions to save and load a MayaVi2 visualization file. """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005, Enthought, Inc. # License: BSD Style. # Standard library imports. import sys from os.path import isfile # Enthought library imports. from pyface.api import FileDialog, OK from pyface.action.api import Action # Local imports from mayavi.plugins.script import get_imayavi from mayavi.core.common import error, exception ###################################################################### # `SaveVisualization` class. ###################################################################### class SaveVisualization(Action): """ An action that saves the current visualization. """ tooltip = "Save current visualization" description = "Save current visualization to a MayaVi2 file" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ wildcard = 'MayaVi2 files (*.mv2)|*.mv2|' + FileDialog.WILDCARD_ALL dialog = FileDialog(parent=self.window.control, title='Save MayaVi2 file', action='save as', wildcard=wildcard ) if dialog.open() == OK: mv = get_imayavi(self.window) mv.save_visualization(dialog.path) ###################################################################### # `LoadVisualization` class. ###################################################################### class LoadVisualization(Action): """ An action that loads a visualization from file. """ tooltip = "Load saved visualization" description = "Load saved visualization from a MayaVi2 file" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ wildcard = 'MayaVi2 files (*.mv2)|*.mv2|' + FileDialog.WILDCARD_ALL parent = self.window.control dialog = FileDialog(parent=parent, title='Open MayaVi2 file', action='open', wildcard=wildcard ) if dialog.open() == OK: if not isfile(dialog.path): error("File '%s' does not exist"%dialog.path, parent) return mv = get_imayavi(self.window) mv.load_visualization(dialog.path) ###################################################################### # `RunScript` class. ###################################################################### class RunScript(Action): """ An action that runs a mayavi script. WARNING: this can be dangerous since the file runs execfile! """ tooltip = "Execute a Python script (typically a Mayavi script)" description = "Execute a Python script (typically a Mayavi script)" ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ wildcard = 'Python files (*.py)|*.py' parent = self.window.control dialog = FileDialog(parent=parent, title='Open Python file', action='open', wildcard=wildcard ) if dialog.open() == OK: if not isfile(dialog.path): error("File '%s' does not exist"%dialog.path, parent) return # Get the globals. # The following code is taken from scripts/mayavi2.py. g = sys.modules['__main__'].__dict__ if 'mayavi' not in g: mv = get_imayavi(self.window) g['mayavi'] = mv g['engine'] = mv.engine # Do execfile try: # If we don't pass globals twice we get NameErrors and nope, # using exec open(script_name).read() does not fix it. execfile(dialog.path, g, g) except Exception, msg: exception(str(msg))
{ "repo_name": "alexandreleroux/mayavi", "path": "mayavi/action/save_load.py", "copies": "2", "size": "4447", "license": "bsd-3-clause", "hash": -2376420760774065700, "line_mean": 36.3697478992, "line_max": 79, "alpha_frac": 0.4535642006, "autogenerated": false, "ratio": 5.07648401826484, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6530048218864839, "avg_score": null, "num_lines": null }
"""Actions to start various filters. """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005-2008, Enthought, Inc. # License: BSD Style. from pyface.action.api import Action from traits.api import Instance from mayavi.plugins.script import get_imayavi from mayavi.core.registry import registry from mayavi.core.metadata import Metadata from mayavi.core.pipeline_base import PipelineBase def new_class(name, bases, dict_): try: import new return new.classobj(name, bases, dict_) except ImportError: return type(name, bases, dict_) ###################################################################### # `FilterAction` class. ###################################################################### class FilterAction(Action): # The Metadata associated with this particular action. metadata = Instance(Metadata) mayavi = Instance('mayavi.plugins.script.Script') # We disable the actions by default since these are dynamically # enabled depending on the current selection or object. enabled = False def __init__(self, **traits): super(FilterAction, self).__init__(**traits) self.mayavi.engine.on_trait_change(self._update_enabled, ['current_selection', 'current_object']) ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ callable = self.metadata.get_callable() obj = callable() mv = self.mayavi mv.add_filter(obj) mv.engine.current_selection = obj def _update_enabled(self, obj): if isinstance(obj, PipelineBase): e = obj.menu_helper.check_active(self.metadata) self.enabled = e else: self.enabled = False def _mayavi_default(self): return get_imayavi(self.window) ###################################################################### # Creating the filter actions automatically. for filter in registry.filters: d = {'tooltip': filter.tooltip, 'description': filter.desc, 'metadata': filter} action = new_class(filter.id, (FilterAction,), d) globals()[filter.id] = action
{ "repo_name": "dmsurti/mayavi", "path": "mayavi/action/filters.py", "copies": "2", "size": "2410", "license": "bsd-3-clause", "hash": -322979925196029200, "line_mean": 32.9436619718, "line_max": 79, "alpha_frac": 0.5390041494, "autogenerated": false, "ratio": 4.679611650485437, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0009054325955734406, "num_lines": 71 }
"""Actions to start various filters. """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005-2008, Enthought, Inc. # License: BSD Style. import new from pyface.action.api import Action from traits.api import Instance from mayavi.plugins.script import get_imayavi from mayavi.core.registry import registry from mayavi.core.metadata import Metadata from mayavi.core.pipeline_base import PipelineBase ###################################################################### # `FilterAction` class. ###################################################################### class FilterAction(Action): # The Metadata associated with this particular action. metadata = Instance(Metadata) mayavi = Instance('mayavi.plugins.script.Script') # We disable the actions by default since these are dynamically # enabled depending on the current selection or object. enabled = False def __init__(self, **traits): super(FilterAction, self).__init__(**traits) self.mayavi.engine.on_trait_change(self._update_enabled, ['current_selection', 'current_object']) ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ callable = self.metadata.get_callable() obj = callable() mv = self.mayavi mv.add_filter(obj) mv.engine.current_selection = obj def _update_enabled(self, obj): if isinstance(obj, PipelineBase): e = obj.menu_helper.check_active(self.metadata) self.enabled = e else: self.enabled = False def _mayavi_default(self): return get_imayavi(self.window) ###################################################################### # Creating the filter actions automatically. for filter in registry.filters: d = {'tooltip': filter.tooltip, 'description': filter.desc, 'metadata': filter} action = new.classobj(filter.id, (FilterAction,), d) globals()[filter.id] = action
{ "repo_name": "liulion/mayavi", "path": "mayavi/action/filters.py", "copies": "2", "size": "2251", "license": "bsd-3-clause", "hash": -6005527612318811000, "line_mean": 32.1029411765, "line_max": 79, "alpha_frac": 0.5353176366, "autogenerated": false, "ratio": 4.748945147679325, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6284262784279325, "avg_score": null, "num_lines": null }
"""Actions to start various modules. """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005-2008, Enthought, Inc. # License: BSD Style. import new # Local imports. from mayavi.core.registry import registry from mayavi.core.metadata import ModuleMetadata from mayavi.core.pipeline_info import PipelineInfo from mayavi.action.filters import FilterAction ###################################################################### # `ModuleAction` class. ###################################################################### class ModuleAction(FilterAction): ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ callable = self.metadata.get_callable() obj = callable() mv = self.mayavi mv.add_module(obj) mv.engine.current_selection = obj ###################################################################### # `AddModuleManager` class. ###################################################################### class AddModuleManager(ModuleAction): """ An action that adds a ModuleManager to the tree. """ tooltip = "Add a ModuleManager to the current source/filter" description = "Add a ModuleManager to the current source/filter" metadata = ModuleMetadata(id="AddModuleManager", class_name="mayavi.core.module_manager.ModuleManager", menu_name="&Add ModuleManager", tooltip="Add a ModuleManager to the current source/filter", description="Add a ModuleManager to the current source/filter", input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ) def perform(self, event): """ Performs the action. """ from mayavi.core.module_manager import ModuleManager mm = ModuleManager() mv = self.mayavi mv.add_module(mm) mv.engine.current_selection = mm ###################################################################### # Creating the module actions automatically. for module in registry.modules: d = {'tooltip': module.tooltip, 'description': module.desc, 'metadata': module} action = new.classobj(module.id, (ModuleAction,), d) globals()[module.id] = action
{ "repo_name": "liulion/mayavi", "path": "mayavi/action/modules.py", "copies": "2", "size": "2530", "license": "bsd-3-clause", "hash": 353088184930345200, "line_mean": 35.1428571429, "line_max": 79, "alpha_frac": 0.5059288538, "autogenerated": false, "ratio": 5.184426229508197, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6690355083308196, "avg_score": null, "num_lines": null }
"""Actions to start various modules. """ # Author: Prabhu Ramachandran <prabhu_r@users.sf.net> # Copyright (c) 2005-2015, Enthought, Inc. # License: BSD Style. # Local imports. from mayavi.core.registry import registry from mayavi.core.metadata import ModuleMetadata from mayavi.core.pipeline_info import PipelineInfo from mayavi.action.filters import FilterAction, new_class ###################################################################### # `ModuleAction` class. ###################################################################### class ModuleAction(FilterAction): ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Performs the action. """ callable = self.metadata.get_callable() obj = callable() mv = self.mayavi mv.add_module(obj) mv.engine.current_selection = obj ###################################################################### # `AddModuleManager` class. ###################################################################### class AddModuleManager(ModuleAction): """ An action that adds a ModuleManager to the tree. """ tooltip = "Add a ModuleManager to the current source/filter" description = "Add a ModuleManager to the current source/filter" metadata = ModuleMetadata(id="AddModuleManager", class_name="mayavi.core.module_manager.ModuleManager", menu_name="&Add ModuleManager", tooltip="Add a ModuleManager to the current source/filter", description="Add a ModuleManager to the current source/filter", input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ) def perform(self, event): """ Performs the action. """ from mayavi.core.module_manager import ModuleManager mm = ModuleManager() mv = self.mayavi mv.add_module(mm) mv.engine.current_selection = mm ###################################################################### # Creating the module actions automatically. for module in registry.modules: d = {'tooltip': module.tooltip, 'description': module.desc, 'metadata': module} action = new_class(module.id, (ModuleAction,), d) globals()[module.id] = action
{ "repo_name": "dmsurti/mayavi", "path": "mayavi/action/modules.py", "copies": "2", "size": "2525", "license": "bsd-3-clause", "hash": -9163397943547308000, "line_mean": 36.6865671642, "line_max": 79, "alpha_frac": 0.5053465347, "autogenerated": false, "ratio": 5.184804928131417, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6690151462831416, "avg_score": null, "num_lines": null }
"""actions to use """ import sftoolbox.utils import os import sys import uuid import imp import sftoolbox class Action(object): """base implmentation of an action """ json_type = 'action' def __init__(self, project): """construct the action """ project.add(self) self.project = project self.label = None self.idname = str(uuid.uuid4()) self.description = None self.icon_filepath = None self.icon_size = None self.enabled = True self.visible = True self.style_sheet = None self.tags = [] def _apply_json(self, data): """apply the json data """ self.label = data.get('label') self.idname = data.get('idname') self.description = data.get('description') self.icon_filepath = data.get('icon') self.icon_size = data.get('icon_size', 'small') self.enabled = data.get('enabled', True) self.visible = data.get('visible', True) self.style_sheet = data.get('style_sheet', None) tags = [] if 'tags' in data: if isinstance(data['tags'], basestring): tags += [tag.strip() for tag in data['tags'].split(',')] else: tags += data['tags'] self.tags = tags @property def absolute_icon_filepath(self): if self.icon_filepath: return os.path.join(self.project.directory, self.icon_filepath) return None @classmethod def from_json(cls, project, data): action = cls(project) action._apply_json(data) return action def run(self): """run the given action """ return True @property def is_runnable(self): return True @property def human_label(self): if self.label: return str(self.label) else: return sftoolbox.utils.human_readable(str(self.idname)) @sftoolbox.engine.register_action_class class DummyAction(Action): """dummy action not doing anything """ json_type = 'dummy' def run(self): """run the dummy action doing nothing """ return True @sftoolbox.engine.register_action_class class PythonCodeAction(Action): """python string execution """ json_type = 'python' def __init__(self, project, code=None): super(PythonCodeAction, self).__init__(project) self.code = code def _apply_json(self, data): super(PythonCodeAction, self)._apply_json(data) self.code = data.get('code', None) @property def is_runnable(self): if not self.code: return False return True def run(self): if self.code: exec self.code return True else: raise False @sftoolbox.engine.register_action_class class PythonScriptAction(Action): """run a python script filepath that is given """ json_type = 'python_script' def __init__(self, project, filepath=None): """python script filepath """ super(PythonScriptAction, self).__init__(project) self.filepath = filepath @property def absolute_filepath(self): filepath = os.path.join(self.project.directory, self.filepath) return filepath @property def is_runnable(self): if not os.path.exists(self.absolute_filepath): return False return True def run(self): if sys.version_info > (3, 0): exec (open(self.absolute_filepath).read()) else: execfile(self.absolute_filepath) return True def _apply_json(self, data): super(PythonScriptAction, self)._apply_json(data) self.filepath = data.get('filepath', None) @sftoolbox.engine.register_action_class class PythonFunctionAction(Action): """run a python script filepath that is given """ json_type = 'python_function' def __init__(self, project, filepath=None): """python script filepath """ super(PythonFunctionAction, self).__init__(project) self.filepath = filepath self.function_name = None self.args = [] self.kwargs = {} @property def absolute_filepath(self): if not self.filepath: return return os.path.join(self.project.directory, self.filepath) @property def is_runnable(self): if not self.absolute_filepath: return False if not os.path.exists(self.absolute_filepath): return False return True def run(self): """load the module and run the function """ module_name = os.path.basename(self.absolute_filepath) module_name, _ = os.path.splitext(module_name) module_object = imp.load_source(module_name, self.absolute_filepath) function_name = self.function_name if not function_name: function_name = 'main' func = getattr(module_object, function_name) func(*self.args, **self.kwargs) return True def _apply_json(self, data): super(PythonFunctionAction, self)._apply_json(data) self.filepath = data.get('filepath', None) self.function_name = data.get('function', None) self.args = data.get('args', []) self.kwargs = data.get('kwargs', {}) def action_from_json(project, value): """return a action from the given json """ json_type = value.get('type') for class_ in sftoolbox.engine.action_classes_register: if json_type == class_.json_type: return class_.from_json(project, value) return DummyAction.from_json(project, value)
{ "repo_name": "svenfraeys/sftoolbox", "path": "sftoolbox/actions.py", "copies": "1", "size": "5759", "license": "mit", "hash": -1954336834356285200, "line_mean": 24.9414414414, "line_max": 76, "alpha_frac": 0.5879492968, "autogenerated": false, "ratio": 4.047083626141954, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5135032922941953, "avg_score": null, "num_lines": null }
"""Actions triggered on various signals. """ import web from . import account, signals from .sendmail import sendmail_with_template from .models import Activity def record_activity(name, info): user = account.get_current_user() Activity.new(name, user, info) @signals.trainer_signup.connect def activity_trainer_signup(trainer): record_activity('trainer-signup', trainer.dict()) @signals.org_signup.connect def activity_org_signup(org): record_activity('org-signup', org.dict()) @signals.new_workshop.connect def activity_new_workshop(workshop): record_activity('new-workshop', workshop.dict()) @signals.workshop_express_interest.connect def activity_express_interest(workshop, trainer): record_activity('workshop-express-interest', dict( workshop=workshop.dict(), trainer=trainer.dict())) @signals.workshop_withdraw_interest.connect def activity_withdraw_interest(workshop, trainer): record_activity('workshop-withdraw-interest', dict( workshop=workshop.dict(), trainer=trainer.dict())) @signals.workshop_confirmed.connect def activity_workshop_confirmed(workshop, trainer): record_activity('workshop-confirmed', dict( workshop=workshop.dict(), trainer=trainer.dict())) @signals.workshop_confirmed.connect def on_workshop_confirmed(workshop, trainer): org = workshop.get_org() member_emails = [m.email for m, role in org.get_members()] sendmail_with_template("emails/workshop_confirmed.html", subject="{} workshop is confirmed".format(workshop['title']), to=[trainer.email] + member_emails, cc=web.config.get('contact_email'), workshop=workshop, trainer=trainer) @signals.trainer_signup.connect def trainer_welcome_email(trainer): sendmail_with_template("emails/trainers/welcome.html", subject="Welcome to Python Express", to=trainer.email, trainer=trainer) @signals.org_signup.connect def org_welcome_email(org): pass @signals.new_workshop.connect def on_new_workshop(org): pass @signals.new_comment.connect def on_new_comment(comment): workshop = comment.get_workshop() subject = "New comment on {}".format(workshop.title) for u in workshop.get_followers(): sendmail_with_template("emails/comment-added.html", to=u.email, subject=subject, workshop=workshop, user=u, comment=comment)
{ "repo_name": "anandology/broadgauge", "path": "broadgauge/actions.py", "copies": "2", "size": "2447", "license": "bsd-3-clause", "hash": -1327422437383426800, "line_mean": 29.6, "line_max": 69, "alpha_frac": 0.701675521, "autogenerated": false, "ratio": 3.5007153075822606, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5202390828582261, "avg_score": null, "num_lines": null }
# actionsystem.py # # A bunch of rule tables for handling what to do with actions in the # game. These are assumed to be used in a player context. from textadv.core.patterns import BasicPattern from textadv.core.rulesystem import ActivityTable, RuleTable, AbortAction, make_rule_decorator from textadv.gamesystem.utilities import str_with_objs ### ### Actions ### class BasicAction(BasicPattern) : """All patterns which represent actions should subclass this object. It importantly implements gerund_form, which should print something informative like "doing suchandsuch with whatever".""" verb = "NEED VERB" gerund = "NEEDING GERUND" num_turns = 1 dereference_dobj = True dereference_iobj = True def __init__(self, *args) : if len(args) == self.numargs : self.args = args else : raise Exception("Pattern requires exactly "+str(self.numargs)+" arguments.") def update_actor(self, newactor) : """Sometimes the actor is not set properly (for instance, with AskingTo). We need to be able to reset it.""" self.args = list(self.args) self.args[0] = newactor def get_actor(self) : """An accessor method for the actor of the action. Assumed to be first element.""" return self.args[0] def get_do(self) : """An accessor method for the direct object of the action. Assumed to be the second element.""" return self.args[1] def get_io(self) : """An accessor method for the indirect object of the action. Assumed to be the third element.""" return self.args[2] def gerund_form(self, ctxt) : if len(self.args) == 1 : return self.gerund elif len(self.args) == 2 : if self.dereference_dobj : dobj = str_with_objs("[the $x]", x=self.args[1]) else : dobj = self.args[1] return self.gerund + " " + dobj elif len(self.args) == 3 : if self.dereference_dobj : dobj = str_with_objs("[the $x]", x=self.args[1]) else : dobj = self.args[1] if self.dereference_iobj : iobj = str_with_objs("[the $x]", x=self.args[2]) else : iobj = self.args[2] return (self.gerund[0] + " " + dobj + " " + self.gerund[1] + " " + iobj) else : raise Exception("Default gerund form only works with 1-3 args") def infinitive_form(self, ctxt) : """Doesn't prepend "to".""" if len(self.args) == 1 : return self.verb elif len(self.args) == 2 : if self.dereference_dobj : dobj = str_with_objs("[the $x]", x=self.args[1]) else : dobj = self.args[1] return self.verb + " " + dobj elif len(self.args) == 3 : if self.dereference_dobj : dobj = str_with_objs("[the $x]", x=self.args[1]) else : dobj = self.args[1] if self.dereference_iobj : iobj = str_with_objs("[the $x]", x=self.args[2]) else : iobj = self.args[1] return (self.verb[0] + " " + dobj + " " + self.verb[1] + " " + iobj) else : raise Exception("Default gerund form only works with 1-3 args") ### ### Handling actions ### class ActionSystem(object) : def __init__(self) : self.action_verify = RuleTable(doc="""Handles verifying actions for being at least somewhat logical. Should not change world state.""") self.action_trybefore = RuleTable(doc="""Handles a last attempt to make the action work (one shouldn't work with this table directly).""") self.action_before = RuleTable(doc="""Checks an action to see if it is even possible (opening a door is logical, but it's not immediately possible to open a locked door)""") self.action_when = RuleTable(doc="""Carries out the action. Must not fail.""") self.action_report = RuleTable(doc="""Explains what happened with this action. Should not change world state.""") self.verify = make_rule_decorator(self.action_verify) self.trybefore = make_rule_decorator(self.action_trybefore) self.before = make_rule_decorator(self.action_before) self.when = make_rule_decorator(self.action_when) self.report = make_rule_decorator(self.action_report) def verify_action(self, action, ctxt) : """Returns either the best reason for doing the action, or, if there is a reason not to do it, the worst.""" reasons = self.action_verify.notify(action, {"ctxt" : ctxt}, {"world" : ctxt.world}) reasons = [r for r in reasons if r is not None] reasons.sort(key=lambda x : x.score) if len(reasons) == 0 : return LogicalOperation() else : if not reasons[0].is_acceptible() : return reasons[0] else : return reasons[-1] def run_action(self, action, ctxt, is_implied=False, write_action=False, silently=False) : """Runs an action by the following steps: * Verify - if the action is not reasonable, then the action fails * Trybefore - just tries to make the world in the right state for Before to succeed. * Before - checks if the action is possible. May throw DoInstead to redirect execution. * When - carries out the action. * Report - reports the action. Executes if the silently flag is False. is_implied, if true forces a description of the action to be printed. Also (should) prevent possibly dangerous actions from being carried out. write_action is a boolean or a string such as "(first %s)". If considered to be true, then describes action. silently, if true, prevents reporting the action.""" if (write_action or is_implied) : if write_action is True : write_action = "(%s)" ctxt.write(write_action % action.gerund_form(ctxt)) ctxt.write("[newline]") reasonable = self.verify_action(action, ctxt) if not reasonable.is_acceptible() : ctxt.write(reasonable.reason) raise AbortAction() self.action_trybefore.notify(action, {"ctxt" : ctxt}, {"world" : ctxt.world}) try : self.action_before.notify(action, {"ctxt" : ctxt}, {"world" : ctxt.world}) except DoInstead as ix : msg = False if ix.suppress_message or silently else "(%s instead)" self.run_action(ix.instead, ctxt, write_action=msg) return did_something = self.action_when.notify(action, {"ctxt" : ctxt}, {"world" : ctxt.world}) #if not did_something : # raise AbortAction("There was nothing to do.") # this doesn't seem to be the right thing to do. if not silently : self.action_report.notify(action, {"ctxt" : ctxt}, {"world" : ctxt.world}) def do_first(self, action, ctxt, silently=False) : """Runs an action with a "(first /doing something/)" message. If silently is True, then this message is not printed.""" self.run_action(action, ctxt=ctxt, is_implied=True, write_action="(first %s)", silently=silently) def copy(self) : """Returns a copy which behaves like the original, but for which modifications do not change the original.""" newat = ActionSystem() newat.action_verify = self.action_verify.copy() newat.action_trybefore = self.action_trybefore.copy() newat.action_before = self.action_before.copy() newat.action_when = self.action_when.copy() newat.action_report = self.action_report.copy() newat.verify = make_rule_decorator(newat.action_verify) newat.trybefore = make_rule_decorator(newat.action_trybefore) newat.before = make_rule_decorator(newat.action_before) newat.when = make_rule_decorator(newat.action_when) newat.report = make_rule_decorator(newat.action_report) return newat def make_documentation(self, escape, heading_level=1) : hls = str(heading_level) print "<h"+hls+">Event system</h"+hls+">" print "<p>This is the documentation for the event system.</p>" def _make_action_docs(heading_level, table, heading) : shls = str(heading_level+1) print "<h"+shls+">"+heading+"</h"+shls+">" table.make_documentation(escape, heading_level=heading_level+2) _make_action_docs(heading_level, self.action_verify, "action_verify") _make_action_docs(heading_level, self.action_trybefore, "action_trybefore") _make_action_docs(heading_level, self.action_before, "action_before") _make_action_docs(heading_level, self.action_when, "action_when") _make_action_docs(heading_level, self.action_report, "action_report") ## ## for Verify ## class BasicVerify(object) : """An object returned by an action verifier. The score is a value from 0 (completely illogical) to 100 (perfectly logical). Higher values may be used to make an object more likely to be used when disambiguating.""" LOGICAL_CUTOFF = 90 def __init__(self, score, reason) : self.score = score self.reason = reason def is_acceptible(self) : return self.score >= self.LOGICAL_CUTOFF def __repr__(self) : return "<BasicVerify %r %r>" % (self.score, self.reason) class IllogicalNotVisible(BasicVerify) : """For when the thing is illogical because it can't be seen. Meant to prevent unseemly disambiguations because of objects not presently viewable. These kinds of verify objects are special cased in the parser.""" def __init__(self, reason) : self.score = 0 self.reason = reason def VeryLogicalOperation() : """For operations which are particularly apt.""" return BasicVerify(150, "Very good.") def LogicalOperation() : """For when the operation is logical.""" return BasicVerify(100, "All good.") def BarelyLogicalOperation() : """For when the operation is barely logical because something else may be more logical; intended as a failsafe.""" return BasicVerify(90, "Almost not good.") def IllogicalAlreadyOperation(reason) : """For when the operation is illogical because it's been already done.""" return BasicVerify(60, reason) def IllogicalInaccessible(reason) : """For when the operation is illogical because the object is inaccessible.""" return BasicVerify(20, reason) def IllogicalOperation(reason) : return BasicVerify(10, reason) def NonObviousOperation() : """To prevent automatically doing an operation.""" return BasicVerify(99, "Non-obvious.") ## ## Redirecting action execution ## class DoInstead(Exception) : """A "before" event handler can raise this to abort the current action and instead do the action in the argument.""" def __init__(self, instead, suppress_message=False) : self.instead = instead self.suppress_message = suppress_message def verify_instead(action, ctxt) : """Used when it's necessary to verify another action because it's known that a before handler is going to throw a DoInstead.""" raise ActionHandled(verify_action(action, ctxt))
{ "repo_name": "kmill/textadv", "path": "textadv/gamesystem/actionsystem.py", "copies": "1", "size": "11480", "license": "mit", "hash": 2496144328583574500, "line_mean": 43.6692607004, "line_max": 107, "alpha_frac": 0.6232578397, "autogenerated": false, "ratio": 3.8536421617992613, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9905668211768028, "avg_score": 0.014246357946246417, "num_lines": 257 }
"""Activate and deactivate a wmt-exe environment.""" from __future__ import print_function import sys import os from ..env import WmtEnvironment _ACTIVATE_SCRIPT = """ # This file must be used with "source wmt-activate" *from bash* # you cannot run it directly if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then hash -r fi {ENVIRONMENT} """.strip() _PATH_NAMES = ['PATH', 'LD_LIBRARY_PATH', 'PYTHONPATH', 'LD_RUN_PATH', 'CLASSPATH', 'SIDL_DLL_PATH'] _VAR_NAMES = ['TAIL', 'CURL', 'BASH'] def prepend_path(var, path, out=None): path = os.path.abspath(path) env = out or os.environ try: paths = env[var].split(os.pathsep) except KeyError: paths = [] else: paths = [os.path.abspath(p) for p in paths] try: paths.remove(path) except ValueError: pass env[var] = os.pathsep.join([path] + paths) def prepend_paths(var, paths, out=None): for path in paths.split(os.pathsep)[::-1]: prepend_path(var, path) def saved_var_name(name): return '_WMT_OLD_%s' % name def save_vars(names): saved = {} for name in names: if name in os.environ: saved[saved_var_name(name)] = os.environ[name] return saved def restore_vars(names): env = {} for name in names: saved_name = saved_var_name(name) if saved_name in os.environ: env[name] = os.environ.pop(saved_name) else: env[name] = None env[saved_name] = None return env def environ_update(env): updated = save_vars(_PATH_NAMES + _VAR_NAMES) for name in _PATH_NAMES: prepend_paths(name, env[name]) for name in _VAR_NAMES: os.environ[name] = env[name] for name in _PATH_NAMES + _VAR_NAMES: updated[name] = os.environ[name] return updated def environ_as_bash_commands(env): commands = [] names = list(env.keys()) names.sort() for name in names: if env[name] is None: commands.append('unset %s;' % name) else: commands.append('export %s="%s";' % (name, env[name])) return commands def activate(path, extra_bases=[]): env = WmtEnvironment.from_config(path) env = environ_update(env) for base in extra_bases: prepend_path('PATH', os.path.join(base, 'bin'), out=env) prepend_path('LD_LIBRARY_PATH', os.path.join(base, 'lib'), out=env) prepend_path('PYTHONPATH', os.path.join(base, 'lib', 'python2.7', 'site-packages'), out=env) print(os.linesep.join(environ_as_bash_commands(env))) def deactivate(): env = restore_vars(_PATH_NAMES + _VAR_NAMES) print(os.linesep.join(environ_as_bash_commands(env))) def deactivate_main(): import argparse parser = argparse.ArgumentParser( description='Deactivate a WMT environment') args = parser.parse_args() deactivate() def activate_main(): import argparse parser = argparse.ArgumentParser( description='Activate a WMT environment') parser.add_argument('file', nargs='?', type=str, default=None, help='WMT config file') parser.add_argument('--prepend-base', action='append', default=[], help='Extra bases to include in environment') args = parser.parse_args() activate(args.file, extra_bases=args.prepend_base)
{ "repo_name": "csdms/wmt-exe", "path": "wmtexe/cmd/activate.py", "copies": "1", "size": "3415", "license": "mit", "hash": -8818463121232699000, "line_mean": 22.3904109589, "line_max": 77, "alpha_frac": 0.5991215227, "autogenerated": false, "ratio": 3.4918200408997957, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4590941563599796, "avg_score": null, "num_lines": null }
"""Activate coverage at python startup if appropriate. The python site initialisation will ensure that anything we import will be removed and not visible at the end of python startup. However we minimise all work by putting these init actions in this separate module and only importing what is needed when needed. For normal python startup when coverage should not be activated the pth file checks a single env var and does not import or call the init fn here. For python startup when an ancestor process has set the env indicating that code coverage is being collected we activate coverage based on info passed via env vars. """ import atexit import os import signal _active_cov = None def multiprocessing_start(_): global _active_cov cov = init() if cov: _active_cov = cov multiprocessing.util.Finalize(None, cleanup, exitpriority=1000) try: import multiprocessing.util except ImportError: pass else: multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start) def init(): # Only continue if ancestor process has set everything needed in # the env. global _active_cov cov_source = os.environ.get('COV_CORE_SOURCE') cov_config = os.environ.get('COV_CORE_CONFIG') cov_datafile = os.environ.get('COV_CORE_DATAFILE') cov_branch = True if os.environ.get('COV_CORE_BRANCH') == 'enabled' else None cov_context = os.environ.get('COV_CORE_CONTEXT') if cov_datafile: if _active_cov: cleanup() # Import what we need to activate coverage. import coverage # Determine all source roots. if cov_source in os.pathsep: cov_source = None else: cov_source = cov_source.split(os.pathsep) if cov_config == os.pathsep: cov_config = True # Activate coverage for this process. cov = _active_cov = coverage.Coverage( source=cov_source, branch=cov_branch, data_suffix=True, config_file=cov_config, auto_data=True, data_file=cov_datafile ) cov.load() cov.start() if cov_context: cov.switch_context(cov_context) cov._warn_no_data = False cov._warn_unimported_source = False return cov def _cleanup(cov): if cov is not None: cov.stop() cov.save() cov._auto_save = False # prevent autosaving from cov._atexit in case the interpreter lacks atexit.unregister try: atexit.unregister(cov._atexit) except Exception: pass def cleanup(): global _active_cov global _cleanup_in_progress global _pending_signal _cleanup_in_progress = True _cleanup(_active_cov) _active_cov = None _cleanup_in_progress = False if _pending_signal: pending_singal = _pending_signal _pending_signal = None _signal_cleanup_handler(*pending_singal) multiprocessing_finish = cleanup # in case someone dared to use this internal _previous_handlers = {} _pending_signal = None _cleanup_in_progress = False def _signal_cleanup_handler(signum, frame): global _pending_signal if _cleanup_in_progress: _pending_signal = signum, frame return cleanup() _previous_handler = _previous_handlers.get(signum) if _previous_handler == signal.SIG_IGN: return elif _previous_handler and _previous_handler is not _signal_cleanup_handler: _previous_handler(signum, frame) elif signum == signal.SIGTERM: os._exit(128 + signum) elif signum == signal.SIGINT: raise KeyboardInterrupt() def cleanup_on_signal(signum): previous = signal.getsignal(signum) if previous is not _signal_cleanup_handler: _previous_handlers[signum] = previous signal.signal(signum, _signal_cleanup_handler) def cleanup_on_sigterm(): cleanup_on_signal(signal.SIGTERM)
{ "repo_name": "pytest-dev/pytest-cov", "path": "src/pytest_cov/embed.py", "copies": "1", "size": "3983", "license": "mit", "hash": -518753904982576260, "line_mean": 27.45, "line_max": 117, "alpha_frac": 0.6590509666, "autogenerated": false, "ratio": 4.007042253521127, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5166093220121126, "avg_score": null, "num_lines": null }
# Activate Python venv for the script - uncomment to run script on commandline activate_this_file = "/path/to/bin/activate_this.py" execfile(activate_this_file, dict(__file__ = activate_this_file)) import cx_Oracle import argparse import sys import csv from tqdm import tqdm sys.path.insert(0, '/path/to/gwas_data_sources') import gwas_data_sources def get_all_studies_missing_event_data(event_type): ''' Get all Study Ids that are Published (Housekeeping.Is_published=1) but do not have any data in the StudyEvent and Event tables. ''' try: ip, port, sid, username, password = gwas_data_sources.get_db_properties('DEV3') dsn_tns = cx_Oracle.makedsn(ip, port, sid) connection = cx_Oracle.connect(username, password, dsn_tns) cursor = connection.cursor() cursor.prepare(""" SELECT P.ID, P.PUBMED_ID, S.ID AS STUDY_ID FROM PUBLICATION P, STUDY S, HOUSEKEEPING H WHERE P.ID=S.PUBLICATION_ID and S.HOUSEKEEPING_ID=H.ID and H.IS_PUBLISHED=1 MINUS SELECT P.ID, P.PUBMED_ID, S.ID AS STUDY_ID FROM PUBLICATION P, STUDY S, HOUSEKEEPING H, STUDY_EVENT SE, EVENT E WHERE S.PUBLICATION_ID=P.ID and S.HOUSEKEEPING_ID=H.ID and S.ID=SE.STUDY_ID and SE.EVENT_ID=E.ID and E.EVENT_TYPE = :event_type and H.IS_PUBLISHED=1 """) r = cursor.execute(None, {'event_type': event_type}) studies = cursor.fetchall() cursor.close() connection.close() except cx_Oracle.DatabaseError, exception: print exception # Prepare data to generate Insert statements study_missing_added_date = [] study_missing_published_date = [] for result in tqdm(studies): # print result publication_id, pubmed_id, study_id = [str(col) for col in result] housekeeping_results = _check_housekeeping(study_id) # write to file to review data for item in housekeeping_results: # Generate Insert statements for CREATED if event_type == 'STUDY_CREATION': if item[4] is not None: # Example Insert Statement: # INSERT INTO EVENT VALUES (NULL, TO_DATE('1999-01-01 10:01:01', 'yyyy-mm-dd hh24:mi:ss'), 'STUDY_CREATION', 14978333, null); # Note: User_ID=14978333 is the ID for the automatic_mapping_process insert_created = "INSERT INTO EVENT VALUES (NULL, TO_DATE("+ \ "'"+str(item[4])+"'" +", 'yyyy-mm-dd hh24:mi:ss'), "+ \ "'"+event_type+"'" +", 14978333, null)" # Execute insert statements if args.mode == 'production': _execute_statements(study_id, insert_created) else: study_missing_added_date.append(item[2]) # If STUDY_ADDED_DATE is not available, then use LAST_UPDATE_DATE insert_created = "INSERT INTO EVENT VALUES (NULL, TO_DATE("+ \ "'"+str(item[6])+"'" +", 'yyyy-mm-dd hh24:mi:ss'), "+ \ "'"+event_type+"'" +", 14978333, null)" # Execute insert statements if args.mode == 'production': _execute_statements(study_id, insert_created) # Generate Insert statements for PUBLISHED if event_type == 'STUDY_STATUS_CHANGE_PUBLISH_STUDY': if item[5] is not None: # Note: User_ID=14978333 is the ID for the automatic_mapping_process insert_published = "INSERT INTO EVENT VALUES (NULL, TO_DATE("+ \ "'"+str(item[5])+"'" +", 'yyyy-mm-dd hh24:mi:ss'), "+ \ "'"+event_type+"'" +", 14978333, null)" # Execute insert statements if args.mode == 'production': _execute_statements(study_id, insert_published) else: study_missing_published_date.append(item[2]) # For testing, no values expected based on query to get data on Line 27 print "** Missing: ", item[2] if len(study_missing_added_date) != 0: print "Studies missing Added Date: ", len(study_missing_added_date), \ "\n", study_missing_added_date if len(study_missing_published_date) != 0: print "Studies missing Published Date: ", len(study_missing_published_date), \ "\n", study_missing_published_date def _check_housekeeping(id): ''' For each study_id missing "Event" data, check it's Housekeeping information. ''' try: ip, port, sid, username, password = gwas_data_sources.get_db_properties('DEV3') dsn_tns = cx_Oracle.makedsn(ip, port, sid) connection = cx_Oracle.connect(username, password, dsn_tns) cursor = connection.cursor() cursor.prepare(""" SELECT P.ID, P.PUBMED_ID, S.ID, TO_CHAR(H.LAST_UPDATE_DATE, 'yyyy-mm-dd hh24:mi:ss'), TO_CHAR(H.STUDY_ADDED_DATE, 'yyyy-mm-dd hh24:mi:ss'), TO_CHAR(H.CATALOG_PUBLISH_DATE, 'yyyy-mm-dd hh24:mi:ss'), P.PUBLICATION_DATE FROM PUBLICATION P, STUDY S, HOUSEKEEPING H WHERE S.PUBLICATION_ID=P.ID and S.HOUSEKEEPING_ID=H.ID and S.ID = :id """) r = cursor.execute(None, {'id': id}) housekeeping_details = cursor.fetchall() cursor.close() connection.close() except cx_Oracle.DatabaseError, exception: print exception return housekeeping_details def _execute_statements(study_id, sql_statement): ''' Insert a list of Insert statements into the EVENT and STUDY_EVENT tables. ''' # Prepare output file to Append STUDY_EVENT Insert statements to existing file output_file = open('missing_event_data.txt', 'a') csvout = csv.writer(output_file) try: ip, port, sid, username, password = gwas_data_sources.get_db_properties('DEV3') dsn_tns = cx_Oracle.makedsn(ip, port, sid) connection = cx_Oracle.connect(username, password, dsn_tns) cursor = connection.cursor() # https://stackoverflow.com/questions/35327135/retrieving-identity-of-most-recent-insert-in-oracle-db-12c new_id = cursor.var(cx_Oracle.NUMBER) sql_event = sql_statement + " returning id into :new_id" # Write data to file to review csvout.writerow(["StudyID: "+study_id]) csvout.writerow([sql_event]) # Get Event_ID of last row inserted cursor.execute(sql_event, {'new_id': new_id}) event_id = int(new_id.getvalue()) # Execute STUDY_EVENT Insert statement sql_study_event = "INSERT INTO STUDY_EVENT VALUES (" + str(study_id) + ", "+ str(event_id) +")" # Write data to file to review csvout.writerow([sql_study_event+"\n"]) cursor.execute(sql_study_event) # commit or rollback changes if args.mode == 'production': cursor.execute('COMMIT') else: cursor.execute('ROLLBACK') cursor.close() connection.close() except cx_Oracle.DatabaseError, exception: print exception def get_studies_missing_first_publication_event(): ''' For studies that have both a 'STUDY_STATUS_CHANGE_PUBLISH_STUDY' Event and a Housekeeping.Catalog_Publish_Date, check if the Housekeeping.Catalog_Publish_Date is earlier than the 'STUDY_STATUS_CHANGE_PUBLISH_STUDY' Event and if so create a new Event for this "first" Publication event. ''' try: ip, port, sid, username, password = gwas_data_sources.get_db_properties('DEV3') dsn_tns = cx_Oracle.makedsn(ip, port, sid) connection = cx_Oracle.connect(username, password, dsn_tns) cursor = connection.cursor() # Get all StudyIds from Housekeeping where Status is Published sql_housekeeping_published_status = """ SELECT P.ID, P.PUBMED_ID, S.ID, H.CATALOG_PUBLISH_DATE FROM PUBLICATION P, STUDY S, HOUSEKEEPING H, CURATION_STATUS CS WHERE S.PUBLICATION_ID=P.ID and S.HOUSEKEEPING_ID=H.ID and H.CURATION_STATUS_ID=CS.ID and H.CURATION_STATUS_ID=6 and H.CATALOG_PUBLISH_DATE IS NOT NULL """ cursor.execute(sql_housekeeping_published_status) housekeeping_published_studies = cursor.fetchall() # For Testing missing_initial_publication_event = [] # Review Catalog_Publish_Date for each StudyId for hp_study in housekeeping_published_studies: hp_study_id = str(hp_study[2]) hp_publish_date = hp_study[3] # Query Study-StudyEvent-Event with this study_id cursor.prepare(""" SELECT P.ID, P.PUBMED_ID, S.ID, E.EVENT_DATE FROM PUBLICATION P, STUDY S, STUDY_EVENT SE, EVENT E WHERE S.PUBLICATION_ID=P.ID and S.ID=SE.STUDY_ID and SE.EVENT_ID=E.ID and E.EVENT_TYPE='STUDY_STATUS_CHANGE_PUBLISH_STUDY' and S.ID = :id and ROWNUM <= 1 ORDER BY E.EVENT_DATE DESC """) r = cursor.execute(None, {'id': hp_study_id}) initial_publication_event = cursor.fetchone() event_type = 'STUDY_STATUS_CHANGE_PUBLISH_STUDY' if initial_publication_event is not None and hp_publish_date < initial_publication_event[3]: insert_initial_published = "INSERT INTO EVENT VALUES (NULL, TO_DATE("+ \ "'"+str(hp_publish_date)+"'" +", 'yyyy-mm-dd hh24:mi:ss'), "+ \ "'"+event_type+"'" + ", 14978333, null)" _execute_statements(hp_study_id, insert_initial_published) elif initial_publication_event is None: missing_initial_publication_event.append(hp_study_id) else: pass # print "Studies missing InitialPubEvent Date: ", len(missing_initial_publication_event) cursor.close() connection.close() except cx_Oracle.DatabaseError, exception: print exception if __name__ == '__main__': ''' Find Publications/Studies that are marked as Published but do not have any Created or Published Event data. ''' # Commandline arguments parser = argparse.ArgumentParser() parser.add_argument('--mode', default='debug', choices=['debug', 'production'], help='Run as (default: debug).') args = parser.parse_args() # Get all studies marked as Published in the # Housekeeping table, but do not have a 'CREATED' StudyEvent CREATED = 'STUDY_CREATION' get_all_studies_missing_event_data(CREATED) # Get all studies marked as Published in the # Housekeeping table, but do not have a 'STUDY_STATUS_CHANGE_PUBLISH_STUDY' StudyEvent PUBLISHED = 'STUDY_STATUS_CHANGE_PUBLISH_STUDY' get_all_studies_missing_event_data(PUBLISHED) # Check if the Housekeeping.Catalog_Publish_Date is earlier # than the 'STUDY_STATUS_CHANGE_PUBLISH_STUDY' Event get_studies_missing_first_publication_event()
{ "repo_name": "EBISPOT/goci", "path": "goci-python-scripts/gwas-event-data-curation/baseline_metrics_data.py", "copies": "1", "size": "11558", "license": "apache-2.0", "hash": -5458130713786899000, "line_mean": 35.8089171975, "line_max": 145, "alpha_frac": 0.585308877, "autogenerated": false, "ratio": 3.697376839411388, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4782685716411388, "avg_score": null, "num_lines": null }