prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
from lib.font import * import sys import fcntl import termios import struct class progress_bar(object): def __init__(self, tot=100, lenght=10): self.cp='/-\|' self.bar_lenght = lenght self.tot = tot def startprogress(self, title): """Creates a progress bar 40 chars long on the console and moves cursor back to
beginning
with BS character""" sys.stdout.write(title + ": [" + "-" * self.bar_lenght + "]" + chr(8) * (self.bar_lenght+1)) sys.stdout.flush() def progress(self, x): """Sets progress bar to a certain percentage x. Progress is given as whole percentage, i.e. 50% done is given by x = 50""" y = int(x)%4 z = int((x/float(self.tot))*self.bar_lenght) sys.stdout.write("#" * z + self.cp[y] +"-" * (self.bar_lenght-1 - z) + "] "+ bold(str(int(x))+"/"+str(self.tot)) + chr(8) * (self.bar_lenght+4+len(str(int(x)))+len(str(self.tot)) )) sys.stdout.flush() def endprogress(self): """End of progress bar; Write full bar, then move to next line""" sys.stdout.write("#" * self.bar_lenght + "]\n") sys.stdout.flush() class all_line_progress_bar(object): def __init__(self): self.COLS = struct.unpack('hh', fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234'))[1] def progress(self,current, total): prefix = '%d / %d' % (current, total) bar_start = ' [' bar_end = '] ' bar_size = self.COLS - len(prefix + bar_start + bar_end) amount = int(current / (total / float(bar_size))) remain = bar_size - amount bar = '#' * amount + ' ' * remain return bold(prefix) + bar_start + bar + bar_end def bar(self, current, total): sys.stdout.write(self.progress(current,total) + '\r') sys.stdout.flush()
def _types_gen(T): yield T if hasattr(T, 't'): for l in T.t: yield l if hasattr(l, 't'): for ll in _types_gen(l): yield ll class Type(type): """ A rudimentary extension to `type` that provides polymorphic types for run-time type checking of JSON data types. IE: assert type(u'') == String assert type('') == String assert type('') == Any assert Any.kind('') == String assert Any.decode('str') == String assert Any.kind({}) == Object """ def __init__(self, *args, **kwargs): type.__init__(self, *args, **kwargs) def __eq__(self, other): for T in _types_gen(self): if isinstance(other, Type): if T in other.t: return True if type.__eq__(T, other): return True return False def __str__(self): return getattr(self, '_name', 'unknown') def N(self, n): self._name = n return self def I(self, *args): self.t = list(ar
gs) return self def kind(self, t): if type(t) is Type: return t ty = lambda t: type(t) if type(t) is type: ty = lambda t: t return reduce( lambda L, R: R if (hasattr(R, 't') and ty(t) == R) else L, filter(lambda T: T is not Any, _types_gen(self))) def decode(self, n): return reduce(
lambda L, R: R if (str(R) == n) else L, _types_gen(self)) # JSON primatives and data types Object = Type('Object', (object,), {}).I(dict).N('obj') Number = Type('Number', (object,), {}).I(int, long).N('num') Boolean = Type('Boolean', (object,), {}).I(bool).N('bit') String = Type('String', (object,), {}).I(str, unicode).N('str') Array = Type('Array', (object,), {}).I(list, set, tuple).N('arr') Nil = Type('Nil', (object,), {}).I(type(None)).N('nil') Any = Type('Any', (object,), {}).I( Object, Number, Boolean, String, Array, Nil).N('any')
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import json except ImportError: import simplejson as json import logging import socket from django.http import HttpResponse from django.utils.translation import ugettext as _ from sqoop import client, conf from sqoop.client.exception import SqoopException from decorators import get_job_or_exception from desktop.lib.exceptions import StructuredException from desktop.lib.rest.http_client import RestException from exception import handle_rest_exception from utils import list_to_dict from django.views.decorators.cache import never_cache __all__ = ['get_jobs', 'create_job', 'update_job', 'job', 'jobs', 'job_clone', 'job_delete', 'job_start', 'job_stop', 'job_status'] LOG = logging.getLogger(__name__) @never_cache def get_jobs(request): response = { 'status': 0, 'errors': None, 'jobs': [] } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) jobs = c.get_jobs() response['jobs'] = list_to_dict(jobs) except RestException, e: response.update(handle_rest_exception(e, _('Could not get jobs.'))) return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache def create_job(request): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } if 'job' not in request.POST: raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job
is missing.'}, error_code=400) d = json.loads(request.POST['job']) job = client.Job.from_dict(d) try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['job'] = c.create_job(job).
to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not create job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache def update_job(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } if 'job' not in request.POST: raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400) job.update_from_dict(json.loads(request.POST['job'])) try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['job'] = c.update_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not update job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache def jobs(request): if request.method == 'GET': return get_jobs(request) elif request.method == 'POST': return create_job(request) else: raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405) @never_cache @get_job_or_exception() def job(request, job): response = { 'status': 0, 'errors': None, 'job': None } if request.method == 'GET': response['job'] = job.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") elif request.method == 'POST': return update_job(request, job) else: raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405) @never_cache @get_job_or_exception() def job_clone(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } job.id = -1 job.name = '%s-copy' % job.name try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['job'] = c.create_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not clone job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_delete(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) c.delete_job(job) except RestException, e: response.update(handle_rest_exception(e, _('Could not delete job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_start(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'submission': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['submission'] = c.start_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not start job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = [e.to_dict()] return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_stop(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'submission': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['submission'] = c.stop_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not stop job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_status(request, job): if request.method != 'GET': raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'submission': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['submission'] = c.get_job_status(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not get job status.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json")
Base>############################################################ def Base(A): B=[] #inicializa la matriz B. esta matriz contiene las ORDENADAS posiciones de la base canonica CB=[] #Contiene las posiciones de la base ordenadas de derecha a izquierda for j in range(len(A[1])-1,0,-1) : #Recorremos las columnas de la matriz a del final al inicio Bt=[] #Bt contiene la columna i-esima de la matriz A for i in range(len(A)) : #Asignacion de Bt Bt.append(A[i][j]) if Bt.count(1) == 1 : #si en Bt solo se encuentra un 1, es decir Bt=ei se agrega suposicion a la lista CB CB.append(j) if len(CB)>=len(A) : #Condicion de parada, si de derecha a izquierda hay mas vectores ei que los que break #la base canonica del problema contine sale deja de buscar vectores ei for i in range(len(A)): #Recorre los renglores de A for j in CB : #^solo en las columnas en las que se encontraron vectores ei if A[i][j] == 1 : #y ordena para tener
la matriz canonica B.append(j) return B ##</Identificar la Base>#####################
####################################### ##<Identificar variables de entrada y salida>############################################################ def EntradaSalida(A,ZC,XB,min_max) : entrada = 0 #iniciamos la entrada, es decir el valor j en cero salida = 0 if min_max == "M" : for i in range(1,len(ZC)) : #recorre todo ZC empezando por la segunda posicion, variando i if ZC[ i ] <= ZC[ entrada ] : #compara i con la posicion de entrada,para la primera vuelta ZC[1] con ZC[0] entrada = i #si la posicion i es menor a la posicion anterior se reasigna la entrada else : for i in range(1,len(ZC)) : #recorre todo ZC empezando por la segunda posicion, variando i if ZC[ i ] >= ZC[ entrada ] : #compara i con la posicion de entrada,para la primera vuelta ZC[1] con ZC[0] entrada = i #si la posicion i es menor a la posicion anterior se reasigna for j in range(len(A)) : #protege de dividir por cero if A[ j ][ entrada ] > 0 : salida = j break for j in range(1,len(A)) : #analógo pero con la divicion de XB/Yij, cuando se encuentra el menor se leasigna a if A[ j ][ entrada ] > 0 : #Protege de dividir por cero if XB[ j ]/A[ j ][ entrada ] <= XB[ salida ]/A[ salida ][ entrada ] : salida = j return entrada, salida ##</Identificar variables de entrada y salida>############################################################ ##<Calcular las ecuaciones de transformacion>############################################################ def Ecuaciones_Trans(A,XB,ZC,entrada,salida) : if wo == False : print "Entra: " + str(entrada) + " Sale: " +str(salida) +"\nYij:" else : output.write("\n\nEntra: " + str(entrada) + " Sale: " +str(salida) +"\nYij:\n") Yij=[] ##Calcular Y###### for i in range(len(A)) : #recorre TODA la matriz A, es decir todos los vectores Yij SYij=[] if i != salida : #cuando estamos en un renglon k != de r (de la salida) usamos la formula adecuada for k in range(len(A[i])) : SYij.append(A[i][k]-(A[salida][k]/A[salida][entrada])*A[i][entrada]) #se guarda el renglon entero en un vector ygorro = "^y" + str(i) + str(k) + " = y" + str(i) + str(k) + " - y" + str(salida) + str(k) + "/y"+ str(salida) + str(entrada) + "*Y"+ str(i) + str(entrada) +" = "+ str(A[i][k]-(A[salida][k]/A[salida][entrada])*A[i][entrada]) if wo ==False : print ygorro else : output.write(ygorro + "\n") Yij.append(SYij) #todos los renglones se guardan en una matriz que luego será la nueva A else : for k in range(len(A[i])) : #analogamanete cuando k=r varsyij = A[salida][k]/A[salida][entrada] SYij.append(varsyij) ygorro = "^y" + str(i) + str(k) + " = y" + str(salida) + str(k) + "/y"+ str(salida) + str(entrada) + " = " + str(varsyij) if wo ==False : print ygorro else : output.write(ygorro + "\n") Yij.append(SYij) #todos los renglones se guardan en una matriz que luego será la nueva A ##Calcular Zj-Cj###### if wo == False : print "\nZj-Cj:" else : output.write("\nZj-Cj:\n") SZC=[] for k in range(len(ZC)) : #calcula todas las Zj-Cj de acuerdo a la formula de cambio y lo guarda en SZC varszc = ZC[k]-(A[salida][k]/A[salida][entrada])*ZC[entrada] SZC.append(varszc) # que se convertira en el nuevo ZC, es decir Zj-Cj zcgorro= "^Z" + str(k) + " - C" + str(k) + " = (Z" + str(k) + " - C" + str(k) + ") - ( y" + str(salida) + str(k) + " / y"+str(salida) + str(entrada)+" ) * (Z" + str(entrada) + " - C" + str(entrada) + ") = " + str(varszc) if wo == False : print zcgorro else : output.write (zcgorro + "\n") ##Calcular las XB##### if wo == False : print "\nXB's:" else: output.write("\nXB's:") SXB=[] for i in range(len(XB)) : #Procedimiento adecuado para las XB. Estas son almacenadas en un vector SXV if i != salida : #que será el nuevo XB varsxb=XB[i]-(XB[salida]/A[salida][entrada])*A[i][entrada] SXB.append(varsxb) xbgorro = "^XB" + str (i) +" = XB" +str(i)+ " - (XB"+str(salida)+" / y"+str(salida)+str(entrada)+" ) * y"+str(i)+str(entrada) +" = " + str(varsxb) if wo == False : print xbgorro else : output.write(xbgorro + "\n") else : varsxb=XB[salida]/A[salida][entrada] SXB.append(varsxb) xbgorro = "^XB" + str (i) +" = XB"+str(salida)+" / y"+str(salida)+str(entrada) +" = " + str(varsxb) if wo == False : print xbgorro else : output.write(xbgorro + "\n") ##Reasignar los resultados de las ecuaciones de transformacion ########### return Yij, SZC, SXB #se regresan Yij, SZC y SXB para su reasignacion ##</Calcular las ecuaciones de transformacion>############################################################ ######################################################### </FUNCIONES> ####################################################### ##################################################### <MAIN> ################################################ print "\nResolución de PPL, por el metodo Simplex" #Inicializar variables###### rest="" restricciones=[] datos=False wo=False ############################ ##Pidiendo los datos desde un archivo for argumento in sys.argv : if argumento == "-V" or argumento == "--about": print """Resolución del Metodo Simplex Version 1.2 RC Desarrollado por: > Ramirez Vasquez Indira 131162 > Rivas Espinoza Arturo > Reyes Conde Ivan 131621 Oct 2008 """ sys.exit() if argumento[:2] == "-S" : f = open(argumento[3:],'r') ##Abre el archivo indicado por el argumento -S fuente=f.readlines() ##Crea un arreglo con las lineas del archivo f.close() ##Cierra el archivo min_max=fuente[0][0] ##Asigna el minmax eq=fuente[1][:-1] ##Asigna la funcion objetivo for i in range(2,len(fuente)-1) : restricciones.append(fuente[i][:-1]) ##Asigna las restricciones datos=True ##Levanta una bandera para decir que ya se tienen todos los datos ##Pidiendo los datos por linea de comando if datos == False : ##Pedir funcion a optimizar min_max = raw_input("Es un problema de Maximización/minimización? (M/m) ") while (min_max.upper() != "M") : min_max = raw_input("Opción no valida, Maximización o minimizacion? (M/m)") if min_max == "M": print "\nProblema de Maximizacion" elif min_max =="m": print "\nProblema de minimizacion" eq= raw_input("\nIntroduzca la ecuación Z a optimizar\nZ=") ##Pedir restricciones print "\n\nIntroduzca las restricciones\n" while rest != "." : #mientras no se le alimente con un '.' sigue agregando restricciones rest=raw_input() if rest !="." : restricciones.append(rest) for argumento in sys.argv : if argumento[:2] == "-O" : output=open(argumento[3:],'w') wo=True #####REALIZANDO LAS OPERACIONES ##Busqueda de vectores necesarios C=GetVectorC(eq) #Encontrando C, agregando h's y A's A, XB, C = MatrixA(restricciones,C,10000,min_max) #Buscando la primera matriz A, con M=10,000. TODO pedir M ColBase = Base(A) #Buscando y ordenando las columnas con ei's es decir donde esta la base canonica Cb = GetCB(C,ColBase)
imp
ort pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order
= 0);
# Copyright 2018 Flight Lab authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing p
ermissions and # limitations under the License. """Library for network related helpers.""" import socket def get_ip(): """Get primary IP (the one with a default route) of local machine. This works on both Linux and Windows platforms, and doesn't require
working internet connection. """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't even have to be reachable s.connect(('10.255.255.255', 1)) return s.getsockname()[0] except: return '127.0.0.1' finally: s.close()
LASS: FuelCellEngDclDataParticleRecovered } } self._telemetered_parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleTelemetered } } self._incomplete_parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl', DataSetDriverConfigKeys.PARTICLE_CLASS: None } self._bad_parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {} } def test_simple(self): """ Read file and verify that all expected particles can be read. Verify that the contents of the particles are correct. This is the happy path. """ log.debug('===== START TEST SIMPLE =====') num_particles_to_request = 25 num_expected_particles = 20 # Test the recovered version log.debug('------ RECOVERED ------') with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) self.assert_particles(particles, 'recovered_20141207s.pwrsys.yml', RESOURCE_PATH) # Test the telemetered version log.debug('----- TELEMETERED -----') with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._telemetered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) self.assert_particles(particles, 'telemetered_20141207s.pwrsys.yml', RESOURCE_PATH) log.debug('===== END TEST SIMPLE =====') def test_bigfile(self): "
"" Read file and verify that all expected particles can be read. Verify that the expected number of particles are produced. Only one test is run as the content of the input files is the same for recovered or telemetered. """ log.debug('===== START TEST BIGFILE =====') num_particles_to_request = num_expected_particles = 870 with open(os.path.join(RESOURCE_PATH, '20141207.pwrsys
.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) log.debug('===== END TEST BIGFILE =====') def test_bad_checksum(self): """ Read file and verify that all expected particles can be read. There are two lines with bad checksums in the file. The checksum after the colon is incorrect on lines 10 and 23 of the input file. Only one test is run as the content of the input files is the same for recovered or telemetered. """ log.debug('===== START TEST BAD CHECKSUM =====') num_particles_to_request = num_expected_particles = 18 with open(os.path.join(RESOURCE_PATH, '20141207s_bcs.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) log.debug('===== END TEST BAD CHECKSUM =====') def test_badly_formed(self): """ Read file and verify that all expected particles can be read. Line 1 Improperly formatted - No particle generated Line 2 Improperly formatted - No particle generated Line 9 - Bad checksum - No particle generated No fuel cell data present on line 11 - No particle generated No fuel cell data present on line 12 - No particle generated No fuel cell data present on line 13 - No particle generated No fuel cell data present on line 14 - No particle generated No fuel cell data present on line 15 - No particle generated Line 20 - Bad checksum - No particle generated Line 24 Improperly formatted - No particle generated Line 26 Improperly formatted - No particle generated Line 27 Improperly formatted - No particle generated Line 28 Bad/Missing Timestamp - No particle generated Line 29 Bad/Missing Timestamp - No particle generated Line 30 No data found - No particle generated Line 31 No terminator found - No particle generated Line 32 Improper format - No particle generated Only one test is run as the content of the input files is the same for recovered or telemetered. """ log.debug('===== START TEST BADLY FORMED =====') num_particles_to_request = 33 num_expected_particles = 16 with open(os.path.join(RESOURCE_PATH, '20141207_badform.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) log.debug('===== END TEST BADLY FORMED =====') def test_bad_configuration(self): """ Attempt to build a parser with a bad configuration. """ log.debug('===== START TEST BAD CONFIGURATION =====') with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle: with self.assertRaises(ConfigurationException): parser = FuelCellEngDclParser(self._bad_parser_config, file_handle, self.exception_callback) log.debug('===== END TEST BAD CONFIGURATION =====') def test_partial_configuration(self): """ Attempt to build a parser with a bad configuration. """ log.debug('===== START TEST PARTIAL CONFIGURATION =====') with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle: with self.assertRaises(ConfigurationException): parser = FuelCellEngDclParser(self._incomplete_parser_config, file_handle, self.exception_callback) log.debug('===== END TEST PARTIAL CONFIGURATION =====') def test_blank_line(self): """ Read file and verify that all expected particles can be read. Verify that the contents of the particles are correct. There are blank lines interspersed in the file. This test verifies that these blank lines do not adversely affect the parser. Only one test is run as the content of the input files is the same for recovered or telemetered. """ log.debug('===== START TEST BLANK LINE =====') num_particles_to_request = 25 num_expected_particles = 20 with open(os.pa
""" Github Authentication """ import httplib2 from django.conf import settings from django.core.mail import send_mail from oauth2client.client import OAuth2WebServerFlow from helios_auth import utils # some parameters to indicate that status updating is not possible STATUS_UPDATES = False # display tweaks LOGIN_MESSAGE = "Log in with GitHub" def get_flow(redirect_url=None): return OAuth2WebServerFlow( client_id=settings.GH_CLIENT_ID, client_secret=settings.GH_CLIENT_SECRET, scope='read:user user:email', auth_uri="https://github.com/login/oauth/authorize", token_uri="https://github.com/login/oauth/access_token", redirect_uri=redirect_url, ) def get_auth_url(request, redirect_url): flow = get_flow(redirect_url) request.session['gh_redirect_uri'] = redirect_url return flow.step1_get_authorize_url() def get_user_info_after_auth(request): redirect_uri = request.session['gh_redirect_uri'] del request.session['gh_redirect_uri'] flow = get_flow(redirect_uri) if 'code' not in request.GET: return None code = request.GET['code'] credentials = flow.step2_exchange(code) http = httplib2.Http(".cache") http = credentials.authorize(http) (_, content) = http.request("https://api.github.com/user", "GET") response = utils.from_json(content.decode('utf-8')) user_id = response['login'] user_name = response['name'] (_, content) = http.request("https://api.github.com/user/emails", "GET") response = utils.from_json(content.decode('utf-8')) user_email = None for email in response: if email['verified'] and email['primary']: user_email = email['email'] break if not user_email: raise Exception("email address with G
itHub not verified") return { 'type': 'github', 'user_id': user_id, 'name': '%s (%s)' % (user_id, user_name), 'info': {'email': user_email}, 'token': {}, } def do_logout(user): return None def update_status(token, message): pass def send_message(user_id, name, user_info, subject, body): send_mail( subject, body, settings.SERVER_EMAIL, ["%s <%s>" % (user_id, user_info['email'])], fail_silently=False, ) def check_constraint(
eligibility, user_info): pass # # Election Creation # def can_create_election(user_id, user_info): return True
result = self.initialize() if result['OK']: self.valid = True else: gLogger.error("Keystone initialization failed: %s" % result['Message']) def initialize(self): """ Initialize the Keystone object obtaining the corresponding token :return: S_OK/S_ERROR """ self.log.debug("Initializing for API version %d" % self.apiVersion) result = self.getToken() if not result['OK']: return result # If the tenant is not specified, try to get it and obtain the tenant specific token if not self.project: result = self.getTenants() if not result['OK']: return result if result['Value']: self.project, self.projectID = result['Value'][0] result = self.getToken(force=True) if not result['OK']: return result return S_OK() def getToken(self, force=False): """Get the Keystone token :param force: flag to force getting the token if even there is one in the cache :return: S_OK(token) or S_ERROR """ if self.token is not None and not force: if self.expires and (self.expires - dateTime()).seconds > 300: return S_OK(self.token) if self.apiVersion == 2: result = self.__getToken2() else: result = self.__getToken3() return result def __getToken2(self): """Get the Keystone token for the version v2 of the keystone service :return: S_OK(token) or S_ERROR """ user = self.parameters.get('User') password = self.parameters.get('Password') authArgs = {} if user and password: authDict = {'auth': {"passwordCredentials": {"username": user, "password": password} } } if self.project: authDict['auth']['tenantName'] = self.project elif self.parameters.get('Auth') == "voms": authDict = {'auth': {'voms': True}} if self.project: authDict['auth']['tenantName'] = self.project if self.parameters.get('Proxy'): authArgs['cert'] = self.parameters.get('Proxy') try: result = requests.post("%s/tokens" % self.url, headers={"Content-Type": "application/json"}, json=authDict, verify=self.caPath, **authArgs) except Exception as exc: return S_ERROR('Exception getting keystone token: %s' % str(exc)) output = result.json() if result.status_code in [400, 401]: message = "None" if 'error' in output: message = output['error'].get('message') return S_ERROR('Authorization error: %s' % message) self.token = str(output['access']['token']['id']) expires = fromString(str(output['access']['token']['expires']).replace('T', ' ').replace('Z', '')) issued = fromString(str(output['access']['token']['issued_at']).replace('T', ' ').replace('Z', '')) self.expires = dateTime() + (expires - issued) self.projectID = output['access']['token']['tenant']['id'] for endpoint in output['access']['serviceCatalog']: if endpoint['type'] == 'compute': self.computeURL = str(endpoint['endpoints'][0]['publicURL']) elif endpoint['type'] == 'image': self.imageURL = str(endpoint['endpoints'][0]['publicURL']) elif endpoint['type'] == 'network': self.networkURL = str(endpoint['endpoints'][0]['publicURL']) return S_OK(self.token) def __getToken3(self): """Get the Keystone token for the version v3 of the keystone service :return: S_OK(token) or S_ERROR """ domain = self.parameters.get('Domain', "Default") user = self.parameters.get('User') password = self.parameters.get('Password') appcred_file = self.parameters.get('Appcred') authDict = {} authArgs = {} if user and password: authDict = {'auth': {"identity": {"methods": ["password"], "password": {"user": {"name": user, "domain": {"name": domain}, "password": password } } } } } elif self.parameters.get('Auth') == "voms": authDict = {"auth": {"identity": {"methods": ["mapped"], "mapped": {'voms': True, 'identity_provider': 'egi.eu',
"protocol": 'mapped'}}}} if self.parameters.get('Proxy'): authArgs['cert'
] = self.parameters.get('Proxy') elif appcred_file: # The application credentials are stored in a file of the format: # id secret ac_fd = open(appcred_file, 'r') auth_info = ac_fd.read() auth_info = auth_info.strip() ac_id, ac_secret = auth_info.split(" ", 1) ac_fd.close() authDict = {'auth': {"identity": {"methods": ["application_credential"], "application_credential": {"id": ac_id, "secret": ac_secret}}}} else: return S_ERROR("No valid credentials provided") # appcred includes the project scope binding in the credential itself if self.project and not appcred_file: authDict['auth']['scope'] = {"project": {"domain": {"name": domain}, "name": self.project } } gLogger.debug('Request token with auth arguments: %s and body %s' % (str(authArgs), str(authDict))) url = "%s/auth/tokens" % self.url try: result = requests.post(url, headers={"Content-Type": "application/json", "Accept": "application/json", }, json=authDict, verify=self.caPath, **authArgs) except Exception as exc: return S_ERROR('Exception getting keystone token: %s' % str(exc)) if result.status_code not in [200, 201, 202, 203, 204]: return S_ERROR('Failed to get keystone token: %s' % result.text) try: self.token = result.headers['X-Subject-Token'] except Exception as exc: return S_ERROR('Failed to get keystone token: %s' % str(exc)) output = result.json() expires = fromString(str(output['token']['expires_at']).replace('T', ' ').replace('Z', '')) issued = fromString(str(output['token']['issued_at']).replace('T', ' ').replace('Z', '')) self.expires = dateTime() + (expires - issued) if 'project' in output['token']: if output['token']['project']['name'] == self.project: self.projectID = output['token']['project']['id'] if 'catalog' in output['token']: for service in output['token']['catalog']: if service['type'] == 'compute': for endpoint in service['endpoints']: if endpoint['interface'] == 'public': self.computeURL = str(endpoint['url']) elif service['type'] == 'image': for endpoint in service['endpoints']: if endpoint['interface'] == 'public': self.imageURL = str(endpoint['url']) elif service['type'] == 'network': for endpoint in service['endpoints']: if endpoint['interface'] == 'public': self.networkURL = str(endpoint['url']) return S_OK(self.token) def getTenants(self): """Get available tenants for the current token :return: S_OK((tenant, tenant_id)) or S_ERROR """ if self.token is None: return S_ERROR("No Keystone token yet available") try: result = requests.get("%s/tenants" % self.url, headers={"Content-Type": "appli
from pymemcache.client.hash import HashClient from pymemcache.client.base import Client, PooledClient from pymemcache.exceptions import MemcacheError, MemcacheUnknownError from pymemcache import pool from .test_client import ClientTestMixin, MockSocket import unittest import pytest import mock import socket class TestHashClient(ClientTestMixin, unittest.TestCase): def make_client_pool(self, hostname, mock_socket_values, serializer=None, **kwargs): mock_client = Client(hostname, serializer=serializer, **kwargs) mock_client.sock = MockSocket(mock_socket_values) client = PooledClient(hostname, serializer=serializer) client.client_pool = pool.ObjectPool(lambda: mock_client) return mock_client def make_client(self, *mock_socket_values, **kwargs): current_port = 11012 client = HashClient([], **kwargs) ip = '127.0.0.1' for vals in mock_socket_values: s = '%s:%s' % (ip, current_port) c = self.make_client_pool( (ip, current_port), vals, **kwargs ) client.clients[s] = c client.hasher.add_node(s) current_port += 1 return client def test_setup_client_without_pooling(self): with mock.patch('pymemcache.client.hash.Client') as internal_client: client = HashClient([], timeout=999, key_prefix='foo_bar_baz') client.add_server('127.0.0.1', '11211') assert internal_client.call_args[0][0] == ('127.0.0.1', '11211') kwargs = internal_client.call_args[1] assert kwargs['timeout'] == 999 assert kwargs['key_prefix'] == 'foo_bar_baz' def test_get_many_all_found(self): client = self.make_client(*[ [b'STORED\r\n', b'VALUE key3 0 6\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VALUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients result = client.set(b'key1', b'value1', noreply=False) result = client.set(b'key3', b'value2', noreply=False) result = client.get_many([b'key1', b'key3']) assert result == {b'key1': b'value1', b'key3': b'value2'} def test_get_many_some_found(self): client = self.make_client(*[ [b'END\r\n', ], [b'STORED\r\n', b'VALUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients result = client.set(b'key1', b'value1', noreply=False) result = client.get_many([b'key1', b'key3']) assert result == {b'key1': b'value1'} def test_get_many_bad_server_data(self): client = self.make_client(*[ [b'STORED\r\n', b'VAXLUE key3 0 6\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VAXLUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients with pytest.raises(MemcacheUnknownError): client.set(b'key1', b'value1', noreply=False) client.set(b'key3', b'value2', noreply=False) client.get_many([b'key1', b'key3']) def test_get_many_bad_server_data_ignore(self): client = self.make_client(*[ [b'STORED\r\n', b'VAXLUE key3 0 6\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VAXLUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ], ignore_exc=True) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients client.set(b'key1', b'value1', noreply=False) client.set
(b'key3', b'value2', noreply=False) result = client.get_many([b'key1', b'key3']) assert result == {} def test_gets_many(self): client = self.make_client(*[ [b'STORED\r\n', b'VALUE key3 0 6 1\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VALUE key1 0 6 1\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3':
return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients assert client.set(b'key1', b'value1', noreply=False) is True assert client.set(b'key3', b'value2', noreply=False) is True result = client.gets_many([b'key1', b'key3']) assert (result == {b'key1': (b'value1', b'1'), b'key3': (b'value2', b'1')}) def test_no_servers_left(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) hashed_client = client._get_client('foo') assert hashed_client is None def test_no_servers_left_raise_exception(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=False, timeout=1, connect_timeout=1 ) with pytest.raises(MemcacheError) as e: client._get_client('foo') assert str(e.value) == 'All servers seem to be down right now' def test_unavailable_servers_zero_retry_raise_exception(self): from pymemcache.client.hash import HashClient client = HashClient( [('example.com', 11211)], use_pooling=True, ignore_exc=False, retry_attempts=0, timeout=1, connect_timeout=1 ) with pytest.raises(socket.error): client.get('foo') def test_no_servers_left_with_commands_return_default_value(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) result = client.get('foo') assert result is None result = client.set('foo', 'bar') assert result is False def test_no_servers_left_with_set_many(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) result = client.set_many({'foo': 'bar'}) assert result is False def test_no_servers_left_with_get_many(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) result = client.get_many(['foo', 'bar']) assert result == {'foo': False, 'bar': False} # TODO: Test failover logic
oman_first_name' db.delete_column('people_persontranslation', 'roman_first_name') # Deleting field 'PersonTranslation.roman_last_name' db.delete_column('people_persontranslation', 'roman_last_name') # Deleting field 'PersonTranslation.non_roman_first_name' db.delete_column('people_persontranslation', 'non_roman_first_name') # Deleting field 'PersonTranslation.non_roman_last_name' db.delete_column('people_persontranslation', 'non_roman_last_name') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser'
: ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length'
: '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 9, 0, 0)'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'filer.file': { 'Meta': {'object_name': 'File'}, '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.folder': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}), 'parent': ('dja
import gtk class ExtensionFeatures: SYSTEM_WIDE = 0 class MountManagerExtension: """Base class for mount manager extensions. Mount manager has only one instance and is created on program startup. Methods defined in this class are called automatically by the mount manager so you need to implement them. """ # features extension supports features = () def __init__(self, parent, window): self._parent = parent self._window = window self._application = self._parent._application # create user interface self._container = gtk.VBox(False, 5) self._controls = gtk.HBox(False, 5) separator = gtk.HSeparator() # pack interface self._container.pack_end(separator, False, False, 0) self._container.pack_end(self._controls, False, False, 0) def can_handle(self, uri): """Returns boolean denoting if specified URI can be handled by this extension""" return False def get_container(self): """Return container widget""" return self._container def get_information(self): """Returns information about extension""" icon = None name = None return icon, name def unmount(self, uri): """Method called by
the mount manager for unmounting the selected URI""" pass def focus_object(self): """Method called b
y the mount manager for focusing main object""" pass @classmethod def get_features(cls): """Returns set of features supported by extension""" return cls.features
""" Basic building blocks for generic class based views. We don't bind behaviour to http method handlers yet, which allows mixin classes to be composed in interesting ways. """ from __future__ import unicode_literals from django.http import Http404 from rest_framework import status from rest_framework.response import Response from rest_framework.request import clone_request import warnings def _get_validation_exclusions(obj, pk=None, slug_field=None, lookup_field=None): """ Given a model instance, and an optional pk and slug field, return the full list of all other field names on that model. For use when performing full_clean on a model instance, so we only clean the required fields. """ include = [] if pk: # Pending deprecation pk_field = obj._meta.pk while pk_field.rel: pk_field = pk_field.rel.to._meta.pk include.append(pk_field.name) if slug_field: # Pending deprecation include.append(slug_field) if lookup_field and lookup_field != 'pk': include.append(lookup_field) return [field.name for field in obj._meta.fields if field.name not in include] class CreateModelMixin(object): """ Create a model instance. """ def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.DATA, files=request.FILES) if serializer.is_valid(): self.pre_save(serializer.object) self.object = serializer.save(force_insert=True) self.post_save(self.object, created=True) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def get_success_headers(self, data): try: return {'Location': data['url']} except (TypeError, KeyError): return {} class ListModelMixin(object): """ List a queryset. """ empty_error = "Empty list and '%(class_name)s.allow_empty' is False." def list(self, request, *args, **kwargs): self.object_list = self.filter_queryset(self.get_queryset()) # Default is to allow empty querysets. This can be altered by setting # `.allow_empty = False`, to raise 404 errors on empty querysets. if not self.allow_empty and not self.object_list: warnings.warn( 'The `allow_empty` parameter is due to be deprecated. ' 'To use `allow_empty=False` style behavior, You should override ' '`get_queryset()` and explicitly raise a 404 on empty querysets.', PendingDeprecationWarning ) class_name = self.__class__.__name__ error_msg = self.empty_error % {'class_name': class_name} raise Http404(error_msg) # Switch between paginated or standard style responses page = self.paginate_queryset(self.object_list) if page is not None: serializer = self.get_pagination_serializer(page) else: serializer = self.get_serializer(self.object_list, many=True) return Response(serializer.data) class RetrieveModelMixin(object): """ Retrieve a model instance. """ def retrieve(self, request, *args, **kwargs): self.object = self.get_object() serializer = self.get_serializer(self.object) return Response(serializer.data) class UpdateModelMixin(object): """ Update a model instance. """ def update(self, request, *args, **kwargs): partial = kwargs.pop('partial', False) self.object = self.get_object_or_none() if self.object is None: created = True save_kwargs = {'force_insert': True} success_status_code = status.HTTP_201_CREATED else
: created = False save_kwargs = {'force_update': True} su
ccess_status_code = status.HTTP_200_OK serializer = self.get_serializer(self.object, data=request.DATA, files=request.FILES, partial=partial) if serializer.is_valid(): self.pre_save(serializer.object) self.object = serializer.save(**save_kwargs) self.post_save(self.object, created=created) return Response(serializer.data, status=success_status_code) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def partial_update(self, request, *args, **kwargs): kwargs['partial'] = True return self.update(request, *args, **kwargs) def get_object_or_none(self): try: return self.get_object() except Http404: if self.request.method == 'PUT': # For PUT-as-create operation, we need to ensure that we have # relevant permissions, as if this was a POST request. This # will either raise a PermissionDenied exception, or simply # return None. self.check_permissions(clone_request(self.request, 'POST')) else: # PATCH requests where the object does not exist should still # return a 404 response. raise def pre_save(self, obj): """ Set any attributes on the object that are implicit in the request. """ # pk and/or slug attributes are implicit in the URL. lookup = self.kwargs.get(self.lookup_field, None) pk = self.kwargs.get(self.pk_url_kwarg, None) slug = self.kwargs.get(self.slug_url_kwarg, None) slug_field = slug and self.slug_field or None if lookup: setattr(obj, self.lookup_field, lookup) if pk: setattr(obj, 'pk', pk) if slug: setattr(obj, slug_field, slug) # Ensure we clean the attributes so that we don't eg return integer # pk using a string representation, as provided by the url conf kwarg. if hasattr(obj, 'full_clean'): exclude = _get_validation_exclusions(obj, pk, slug_field, self.lookup_field) obj.full_clean(exclude) class DestroyModelMixin(object): """ Destroy a model instance. """ def destroy(self, request, *args, **kwargs): obj = self.get_object() obj.delete() return Response(status=status.HTTP_204_NO_CONTENT)
import cv2 import numpy as np import os from vilay.core.Descriptor import MediaTime
, Shape from vilay.detectors.IDete
ctor import IDetector from vilay.core.DescriptionScheme import DescriptionScheme class FaceDetector(IDetector): def getName(self): return "Face Detector" def initialize(self): # define haar-detector file print os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml' self.cascade = cv2.CascadeClassifier(os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml') def detect(self, mediaTimes, tgtDS, film, rootDS, mainGUI): for mediaTime in mediaTimes: for frameIdx in range(mediaTime.startTime, mediaTime.startTime + mediaTime.duration): actFrame = film.getFrame(frameIdx) # preprocessing actFrame = cv2.cvtColor(actFrame, cv2.cv.CV_BGR2GRAY) actFrame = cv2.equalizeHist(actFrame) # detect faces faces = self.cascade.detectMultiScale(actFrame, 1.2, 3, 0, (5,5)) # create ds and add time and shape descriptor for faceIdx in range(len(faces)): [x,y,width,height] = faces[faceIdx,:] ds = DescriptionScheme('RTI', 'Face Detector') region = Shape('Face Detector','rect', np.array([[x, y], [x + width, y + height]])) mediaTime = MediaTime('Face Detector', frameIdx, 1) tgtDS.addDescriptionScheme(ds) ds.addDescriptor(region) ds.addDescriptor(mediaTime)
ty': Quality.SDDVD, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.SDDVD, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []), # HDTV (between init qualities) ({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDTV, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []), # RAWHDTV (max init quality) ({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.RAWHDTV, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []), # FULLHDTV (above init quality + unwanted) ({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDTV, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []), # HDWEBDL (above init quality + unwanted) ({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality
': Quality.HDWEBDL, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDWEBDL, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []), # FULLHDWEBDL (above init quality + unwanted) ({'upgra
deonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDWEBDL, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []), # HDBLURAY (above init quality + unwanted) ({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.HDBLURAY, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []), # FULLHDBLURAY (above init quality + unwanted) ({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.DOWNLOADED, 'unaired': False, 'manual': True}, []), ({'upgradeonce': True, 'quality': Quality.FULLHDBLURAY, 'status': common.ARCHIVED, 'unaired': False, 'manual': True}, []) ]), # init, upgrade quality show (overlapping) ([(Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL), (Quality.RAWHDTV, Quality.HDBLURAY)], [({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.UNAIRED, 'unaired': False, 'manual': False}, []), ({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.SKIPPED, 'unaired': False, 'manual': False}, []), ({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.IGNORED, 'unaired': False, 'manual': False}, []), ({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.WANTED, 'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]), ({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.ARCHIVED, 'unaired': False, 'manual': False}, []), ({'upgradeonce': False, 'quality': Quality.NONE, 'status': common.FAILED, 'unaired': False, 'manual': False}, [Quality.SDDVD, Quality.RAWHDTV, Quality.HDWEBDL]), # SDTV (below init quality) ({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED, 'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]), ({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_PROPER, 'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]), ({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.SNATCHED_BEST, 'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]), ({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.DOWNLOADED, 'unaired': False, 'manual': False}, [Quality.RAWHDTV, Quality.HDBLURAY]), ({'upgradeonce': False, 'quality': Quality.SDTV, 'status': common.ARCHIVED, 'unaired': False, 'manual': False}, []), # SDDVD (init quality) ({'u
def decode_args(s, delimiter="|", escapechar="\\"): args = [] escaping = False current_arg = "" for c in s: if escaping: current_arg += c
escaping = False elif c == escapechar: escaping = True elif c == delimiter: args.append(current_arg) current_arg =
"" else: current_arg += c args.append(current_arg) return args def encode_args(args, delimiter="|", escapechar="\\"): encoded_args = "" for idx, arg in enumerate(args): if idx > 0: encoded_args += delimiter if not isinstance(arg, str): arg = str(arg) for c in arg: if c == delimiter or c == escapechar: encoded_args += escapechar encoded_args += c return encoded_args
# # Python module to parse OMNeT++ vector files # # Currently only suitable for small vector files since # everything is loaded into RAM # # Authors: Florian Kauer <florian.kauer@tuhh.de> # # Copyright (c) 2015, Institute of Telematics, Hamburg University of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the Institute nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import re import scipy.interpolate import numpy as np vectors = [] class OmnetVector: def __init__(self,file_input): self.vectors = {} self.dataTime = {} self.dataValues = {} self.maxtime = 0 self.attrs = {} for line in file_input: m = re.search("([0-9]+)\t([0-9]+)\t([0-9.e\-+]+)\t([0-9.e\-+na]+)",line) #m = re.search("([0-9]+)",line) if m: vector = int(m.group(1)) if not vector in self.dataTime: self.dataTime[vector] = [] self.dataValues[vector] = [] time = float(m.group(3)) self.dataTime[vector].append(t
ime) self.maxtime = max(self.maxtime,time) self.dataValues[vect
or].append(float(m.group(4))) else: # vector 7 Net802154.host[0].ipApp[0] referenceChangeStat:vector ETV m = re.search("vector *([0-9]*) *([^ ]*) *(.*):vector",line) if m: number = int(m.group(1)) module = m.group(2) name = m.group(3) if not name in self.vectors: self.vectors[name] = {} self.vectors[name][module] = number else: m = re.search("attr ([^ ]*) ([^ ]*)\n",line) if m: self.attrs[m.group(1)] = m.group(2) def get_vector(self,name,module,resample=None): num = self.vectors[name][module] (time,values) = (self.dataTime[num],self.dataValues[num]) if resample != None: newpoints = np.arange(0,self.maxtime,resample) lastvalue = values[-1] return (newpoints, scipy.interpolate.interp1d(time,values,'zero',assume_sorted=True, bounds_error=False,fill_value=(0,lastvalue)).__call__(newpoints)) else: return (time,values) def get_attr(self,name): return self.attrs[name]
# Copyright (c) 2013-2016 Molly White # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from plugins.util import admin, command, humanize_list @admin("set") def setcommand(m): """Adjust or view the settings on a command.""" #- !set setting value [#channel] #- #- ```irc #- < GorillaWarfare> !set link auto #- < GorillaBot> "link" set to "auto" in ##GorillaBot. #- ``` #- #- Change settings for a command. Allowed and default settings for a command are viewable in #- the command's documentation. Settings can only be edited for channels the bot is joined #- to, or has been joined to in the past. if len(m.line) > 4: m.bot.private_message(m.location, 'Too many arguments. Use "!s
et setting value [#channel]".') return # Check that this channel is in our config if len(m.line) <= 3: chan = m.location e
lif len(m.line) == 4: if m.line[3][0] != "#": m.bot.private_message(m.location, 'Poorly-formatted command. ' 'Use "!set setting value [#channel]".') return chan = m.line[3] if not chan in m.bot.configuration["chans"]: m.bot.private_message(m.location, "Cannot access settings for {0}. Do I know about the channel?".format( chan)) return # Respond to command settings = m.bot.configuration["chans"][chan]["settings"] if len(m.line) == 1: # Query all settings for channel if not settings: m.bot.private_message(m.location, "Nothing has been set for {0}.".format(chan)) else: m.bot.private_message(m.location, (" ".join( map(lambda s: ('"{0}" is set to "{1}".'.format(s[0], s[1])), iter(settings.items()))))) elif len(m.line) == 2: # Query value of a setting in a channel if not settings or m.line[1] not in settings: m.bot.private_message(m.location, '"{0}" has not been set for {1}.'.format(m.line[1], chan)) else: m.bot.private_message(m.location, '"{0}" set to "{1}" in {2}.'.format(m.line[1], settings[m.line[1]], chan)) else: setting = m.line[1].lower() value = m.line[2].lower() m.bot.configuration["chans"][chan]["settings"][setting] = value m.bot.update_configuration(m.bot.configuration) m.bot.logger.info( '"{0}" set to "{1}" in {2} by {3}.'.format(setting, value, chan, m.sender)) m.bot.private_message(m.location, '"{0}" set to "{1}" in {2}.'.format(setting, value, chan)) @admin() def unset(m): """Unset a given setting.""" #- !unset setting [#channel] #- #- ```irc #- < GorillaWarfare> !unset link #- < GorillaBot> "link" unset for ##GorillaBot. #- ``` #- #- Removes the setting for a channel. This will revert to the default value. Settings can only #- be edited for channels the bot is joined to, or has been joined to in the past. if len(m.line) != 2 and not (len(m.line) == 3 and m.line[2][0] == "#"): m.bot.private_message(m.location, 'Poorly-formatted command. Use "!unset setting [#channel]".') return chan = m.location if len(m.line) == 2 else m.line[2] if chan not in m.bot.configuration["chans"]: m.bot.private_message(m.location, "Cannot unset setting for {0}. Do I know about the channel?".format( chan)) return try: del m.bot.configuration["chans"][chan]["settings"][m.line[1]] m.bot.update_configuration(m.bot.configuration) except KeyError: # Doesn't matter if the value wasn't set to begin with pass m.bot.private_message(m.location, '"{0}" unset for {1}.'.format(m.line[1], chan))
if unsupervisedMethod is None: self.unsupervisedMethod = self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod else: self.unsupervisedMethod = self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod = unsupervisedMethod if numComponents is not None: self.unsupervisedMethod.setNumberOfComponents(numComponents) self.numIterations = numComponents else: self.numIterations = self.unsupervisedMethod.numComponents # overlay lists and filenames self.listOfResultOverlays = [] self.listOfFilenames = [] for i in range(self.numIterations): self.listOfResultOverlays.append(str("Unsupervised/" + self.unsupervisedMethod.shortname + " component %d" % (i+1))) filename = str(self.testdir + "gt_" + self.unsupervisedMethod.shortname + "_result_component_%d.h5" % (i+1)) print filename self.listOfFilenames.append(filename) #******************************************************************************* # T e s t W h o l e M o d u l e D e f a u l t D e c o m p o s e r * #******************************************************************************* class TestWholeModuleDefaultDecomposer(unittest.TestCase): # use default decomposer if not numpyRecentEnough: __test__ = False def setUp(self): self.app = QtCore.QCoreApplication(sys.argv) # we need a QCoreApplication to run, otherwise the thread just gets killed self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5") def test_WholeModule(self): t = QtCore.QTimer() t.setSingleShot(True) t.setInterval(0) self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction) t.start() self.app.exec_() def mainFunction(self): self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance) QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest) self.testThread.start(self.testProject.inputOverlays) self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) def finalizeTest(self): # results comparison self.assertEqual(self.testThread.passedTest, True) self.app.quit() #******************************************************************************* # T e s t W h o l e M o d u l e P C A D e c o m p o s e r * #******************************************************************************* class TestWholeModulePCADecomposer(unittest.TestCase): # use PCA decomposer with 3 components if not numpyRecentEnough: __test__ = False def setUp(self): #print "setUp" self.app = QtCore.QCoreApplication(sys.argv) # we need a QCoreApplication to run, otherwise the thread just gets killed self.numComponents = 3 self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5", UnsupervisedDecompositionPCA, self.numComponents) def test_WholeModule(self): t = QtCore.QTimer() t.setSingleShot(True) t.setInterval(0) self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction) t.start() self.app.exec_() def mainFunction(self): self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance) QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest) self.testThread.start(self.testProject.inputOverlays) self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) def finalizeTest(self): '''for i in range(self.testProject.unsupervisedMethod.numComponents): print "*************************************" print self.testProject.listOfResultOverlays[i] obtained = self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr[self.testProject.listOfResultOverlays[i]] from ilastik.core import dataImpex dataImpex.DataImpex.exportOverlay(str("c:/gt_PCA_result_component_%d" % (i+1)), "h5", obtained)''' # results comparison self.assertEqual(self.testThread.passedTest, True) # other conditions # exactly self.numComponents computed overlays + self.numComponents ground truth overlays were added self.numOverlaysAfter = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) self.assertEqual(self.numOverlaysAfter - self.numOverlaysBefore, self.numComponents*2) self.app.quit() #******************************************************************************* # T e s t W h o l e M o d u l e P L S A D e c o m p o s e r * #******************************************************************************* class TestWholeModulePLSADecomposer(unittest.TestCase): # pLSA with 5 components def setUp(self): #print "setUp" self.app = QtCore.QCoreApplication(sys.ar
gv) self.numComponents = 5 self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5", UnsupervisedDecompositionPLSA, self.numComponents) def test_WholeModule(self): t = QtCore.QTimer()
t.setSingleShot(True) t.setInterval(0) self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction) t.start() self.app.exec_() def mainFunction(self): # fix random seed from ilastik.core.randomSeed import RandomSeed RandomSeed.setRandomSeed(42) self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance) QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest) self.testThread.start(self.testProject.inputOverlays) self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) def finalizeTest(self): '''for i in range(self.testProject.unsupervisedMethod.numComponents): obtained = self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr[self.testProject.listOfResultOverlays[i]] from ilastik.core import dataImpex dataImpex.DataImpex.exportOverlay(str("c:/gt_pLSA_result_component_%d" % (i+1)), "h5", obtained)''' # results comparison self.assertEqual(self.testThread.passedTest, True) # exactly self.numComponents computed overlays + self.numComponents ground truth overlays were added self.numOverlaysAfter = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) self.assertEqual(self.numOverlaysAfter - self.numOverlaysBefore, self.numComponents*2) self.app.quit() #******************************************************************************* # T e s t E t c * #******************************************************************************* class TestEtc(unittest.TestCase): # test additional functionality def test_Etc(self): # check that wrong numbers of components are reset to a valid value in {1, ..., numComponents} numChannels = 10 decomposer = UnsupervisedDecompositionPCA() components = decomposer.checkNumComponents(numChannels, 100) assert((components <= numChannels) & (components >= 1)) components = decomposer.checkNumComponents(numChannels, 0)
from django.utils.translation import ugettext_lazy as _ugl default_app
_config = 'django_sendgrid_
parse.apps.DjangoSendgridParseAppConfig'
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Generate code that updates the source code line. """ def getCurrentLineNumberCode(context): frame_handle = context.getFrameHandle() if frame_handle is None: return "" else: source_ref = context.getCurrentSourceCodeReference() if source_ref.isInternal(): return "" else: return str(source_ref.getLineNumber()) def getLineNumberUpdateCode(context): lineno_value = getCurrentLineNumberCode(contex
t) if lineno_value: frame_handle
= context.getFrameHandle() return "%s->m_frame.f_lineno = %s;" % (frame_handle, lineno_value) else: return "" def getErrorLineNumberUpdateCode(context): ( _exception_type, _exception_value, _exception_tb, exception_lineno, ) = context.variable_storage.getExceptionVariableDescriptions() lineno_value = getCurrentLineNumberCode(context) if lineno_value: return "%s = %s;" % (exception_lineno, lineno_value) else: return "" def emitErrorLineNumberUpdateCode(emit, context): update_code = getErrorLineNumberUpdateCode(context) if update_code: emit(update_code) def emitLineNumberUpdateCode(expression, emit, context): context.setCurrentSourceCodeReference(expression.getCompatibleSourceReference()) code = getLineNumberUpdateCode(context) if code: emit(code) def getSetLineNumberCodeRaw(to_name, emit, context): assert context.getFrameHandle() is not None emit("%s->m_frame.f_lineno = %s;" % (context.getFrameHandle(), to_name)) def getLineNumberCode(to_name, emit, context): assert context.getFrameHandle() is not None emit("%s = %s->m_frame.f_lineno;" % (to_name, context.getFrameHandle()))
"pychemqt", "Equilibrium"), QApplication.translate("pychemqt", "Kinetic"), QApplication.translate("pychemqt", "Catalitic")] TEXT_PHASE = [QApplication.translate("pychemqt", "Global"),
QApplication.translate("pychemqt", "Liquid"), QApplication.translate("pychemqt", "Gas")] TEXT_BASE = [QApplication.translate("pychemqt", "Mole"), QApplication.translate("pychemqt", "Mass"), QApplication.translate("pychemqt", "Partial pressure")] def __init__(self, **kwargs): """constructor, kwargs keys can be: comp: array wit
h index of reaction components coef: array with stequiometric coefficient for each component fase: Phase where reaction work 0 - Global 1 - Liquid 2 - Gas key: Index of key component base 0 - Mol 1 - Mass 2 - Partial pressure Hr: Heat of reaction, calculate from heat of formation if no input formula: boolean to show compound names in formules tipo: Kind of reaction 0 - Stequiometric, without equilibrium or kinetic calculations 1 - Equilibrium, without kinetic calculation 2 - Equilibrium by minimization of Gibbs free energy 3 - Kinetic 4 - Catalytic conversion: conversion value for reaction with tipo=0 keq: equilibrium constant for reation with tipo=1 -it is float if it don't depend with temperature -it is array if it depends with temperature """ self.kwargs = Reaction.kwargs.copy() if kwargs: self.__call__(**kwargs) def __call__(self, **kwargs): oldkwargs = self.kwargs.copy() self.kwargs.update(kwargs) if oldkwargs != self.kwargs and self.isCalculable: self.calculo() @property def isCalculable(self): self.msg = "" self.status = 1 if not self.kwargs["comp"]: self.msg = QApplication.translate("pychemqt", "undefined components") self.status = 0 return if not self.kwargs["coef"]: self.msg = QApplication.translate("pychemqt", "undefined stequiometric") self.status = 0 return if self.kwargs["tipo"] == 0: if self.kwargs["conversion"] is None: self.msg = QApplication.translate("pychemqt", "undefined conversion") self.status = 3 elif self.kwargs["tipo"] == 1: if self.kwargs["keq"] is None: self.msg = QApplication.translate("pychemqt", "undefined equilibrium constants") self.status = 3 elif self.kwargs["tipo"] == 2: pass elif self.kwargs["tipo"] == 3: pass return True def calculo(self): self.componentes = self.kwargs["comp"] self.coef = self.kwargs["coef"] self.tipo = self.kwargs["tipo"] self.base = self.kwargs["base"] self.fase = self.kwargs["fase"] self.calor = self.kwargs["Hr"] self.formulas = self.kwargs["formula"] self.keq = self.kwargs["keq"] databank = sqlite3.connect(databank_name).cursor() databank.execute("select nombre, peso_molecular, formula, \ calor_formacion_gas from compuestos where id IN \ %s" % str(tuple(self.componentes))) nombre = [] peso_molecular = [] formula = [] calor_reaccion = 0 check_estequiometria = 0 for i, compuesto in enumerate(databank): nombre.append(compuesto[0]) peso_molecular.append(compuesto[1]) formula.append(compuesto[2]) calor_reaccion += compuesto[3]*self.coef[i] check_estequiometria += self.coef[i]*compuesto[1] self.nombre = nombre self.peso_molecular = peso_molecular self.formula = formula if self.calor: self.Hr = self.kwargs.get("Hr", 0) else: self.Hr = unidades.MolarEnthalpy(calor_reaccion/abs( self.coef[self.base]), "Jkmol") self.error = round(check_estequiometria, 1) self.state = self.error == 0 self.text = self._txt(self.formulas) def conversion(self, corriente, T): """Calculate reaction conversion corriente: Corriente instance for reaction T: Temperature of reaction""" if self.tipo == 0: # Material balance without equilibrium or kinetics considerations alfa = self.kwargs["conversion"] elif self.tipo == 1: # Chemical equilibrium without kinetics if isinstance(self.keq, list): A, B, C, D, E, F, G, H = self.keq keq = exp(A+B/T+C*log(T)+D*T+E*T**2+F*T**3+G*T**4+H*T**5) else: keq = self.keq def f(alfa): conc_out = [ (corriente.caudalunitariomolar[i]+alfa*self.coef[i]) / corriente.Q.m3h for i in range(len(self.componentes))] productorio = 1 for i in range(len(self.componentes)): productorio *= conc_out[i]**self.coef[i] return keq-productorio alfa = fsolve(f, 0.5) print alfa, f(alfa) avance = alfa*self.coef[self.base]*corriente.caudalunitariomolar[self.base] Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] / self.coef[self.base] for i in range(len(self.componentes))] minimo = min(Q_out) if minimo < 0: # The key component is not correct, redo the result indice = Q_out.index(minimo) avance = self.coef[indice]*corriente.caudalunitariomolar[indice] Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] / self.coef[indice] for i in range(len(self.componentes))] h = unidades.Power(self.Hr*self.coef[self.base] / self.coef[indice]*avance, "Jh") else: h = unidades.Power(self.Hr*avance, "Jh") print alfa, avance caudal = sum(Q_out) fraccion = [caudal_i/caudal for caudal_i in Q_out] return fraccion, h # def cinetica(self, tipo, Ko, Ei): # """Método que define la velocidad de reacción""" # # def _txt(self, nombre=False): """Function to get text representation for reaction""" if nombre: txt = self.nombre else: txt = self.formula reactivos = [] productos = [] for i in range(len(self.componentes)): if self.coef[i] == int(self.coef[i]): self.coef[i] = int(self.coef[i]) if self.coef[i] < -1: reactivos.append(str(-self.coef[i])+txt[i]) elif self.coef[i] == -1: reactivos.append(txt[i]) elif -1 < self.coef[i] < 0: reactivos.append(str(-self.coef[i])+txt[i]) elif 0 < self.coef[i] < 1: productos.append(str(self.coef[i])+txt[i]) elif self.coef[i] == 1: productos.append(txt[i]) elif self.coef[i] > 1: productos.append(str(self.coef[i])+txt[i]) return " + ".join(reactivos)+" ---> "+" + ".join(productos) def __repr__(self): if self.status: eq = self._txt() return eq + " " + "Hr= %0.4e Jkmol" % self.Hr else: return str(self.msg) if __name__ == "__main__": # from lib.corriente import Corriente, Mezcla # mezcla=Corriente(300, 1, 1000, Mezcla([1, 46, 47, 62], [0.03, 0.01, 0.96, 0])) # reaccion=Reaction([1, 46, 47, 62], [-2, 0, -1, 2], base=2) # reaccion.conversion(mezcla) # print reaccion reaccion = Reaction(comp=[1, 47, 62], coef=[-2, -1, 2])
g *meta.customfilename* to False for each object. Extensions like *.html* are not stored. Path matching works independent from extensions. """ maxlength = 55 # max path length containerNamespace = True # unique filenames for container or global extension = None def Init(self): if self.id == 0: # skip roots return self.ListenEvent("commit", "TitleToFilename") self._SetName() def TitleToFilename(self, **kw): """ Uses title for filename """ customfilename = self.data.get("customfilename", None) # might not exist if customfilename: self._SetName() return # create url compatible filename from title filename = self.EscapeFilename(self.meta.title) # make unique filename filename = self.UniqueFilename(filename) if self.AddExtension(filename) == self.meta.pool_filename: # no change return if filename: # update self.meta["pool_filename"] = self.AddExtension(filename) else: # reset filename self.meta["pool_filename"] = "" self._SetName() self.Signal("pathupdate", path=self.meta["pool_filename"]) def UniqueFilename(self, name): """ Converts name to valid path/url """ if name == "file": name = "file_" if self.containerNamespace: unitref = self.parent.id else: unitref = None cnt = 1 root = self.root while root.search.FilenameToID(self.AddExtension(name), unitref, parameter=dict(id=self.id), operators=dict(id="!=")) != 0: if cnt>1: name = name.rstrip("1234567890-") name = name+"-"+str(cnt) cnt += 1 return name def EscapeFilename(self, path): """ Converts name to valid path/url Path length between *self.maxlength-20* and *self.maxlength* chars. Tries to cut longer names at spaces. (based on django's slugify) """ path = unicodedata.normalize("NFKD", path).encode("ascii", "ignore") path = path.decode("utf-8") path = re.sub('[^\w\s-]', '', path).strip().lower() path = re.sub('[-\s]+', '_', path) # avoid ids as filenames try: int(path) path += "_n" except: pass # cut long filenames cutlen = 20 if len(path) <= self.maxlength: return path # cut at '_' pos = path[self.maxlength-cutlen:].find("_") if pos > cutlen: # no '_' found. cut at maxlength. return path[:self.maxlength] return path[:self.maxlength-cutlen+pos] def AddExtension(self, filename): if not self.extension: return filename return "%s.%s" % (filename, self.extension) # system functions ----------------------------------------------------------------- def __getitem__(self, id): """ Traversal lookup based on object.pool_filename and object.id. Trailing extensions are ignored if self.extension is None. `file` is a reserved name and used in the current object to map file downloads. """ if id == "file": raise KeyError(id) if self.extension is None: id = id.split(".") if len(id)>2: id = (".").join(id[:-1]) else: id = id[0] try: id = int(id) except ValueError: name = id id = 0 if name: id = self.root.search.FilenameToID(name, self.id) if not id: raise KeyError(id) obj = self.GetObj(id) if obj is None: raise KeyError(id) return obj def _SetName(self): self.__name__ = self.meta["pool_filename"] if not self.__name__: self.__name__ = str(self.id) class RootPathExtension(object): """
Extension for nive root objects to handle alternative url names """ extension = None # system functions ----------------------------------------------------------------- def __getitem__(self, id): """ Traversal lookup based on object.pool_filename and object.id. Trailing extensions are ignored. `file` is a reserved name and used i
n the current object to map file downloads. """ if id == "file": raise KeyError(id) if self.extension is None: id = id.split(".") if len(id)>2: id = (".").join(id[:-1]) else: id = id[0] try: id = int(id) except: name = id id = 0 if name: id = self.search.FilenameToID(name, self.id) if not id: raise KeyError(id) obj = self.GetObj(id) if not obj: raise KeyError(id) return obj class PersistentRootPath(object): """ Extension for nive root objects to handle alternative url names """ def Init(self): self.ListenEvent("commit", "UpdateRouting") self.ListenEvent("dataloaded", "UpdateRouting") self.UpdateRouting() def UpdateRouting(self, **kw): # check url name of root if self.meta.get("pool_filename"): name = self.meta.get("pool_filename") if name != self.__name__: # close cached root self.app._CloseRootObj(name=self.__name__) # update __name__ and hash self.__name__ = str(name) self.path = name # unique root id generated from name . negative integer. self.idhash = abs(hash(self.__name__))*-1 from nive.tool import Tool, ToolView from nive.definitions import ToolConf, FieldConf, ViewConf, IApplication tool_configuration = ToolConf( id = "rewriteFilename", context = "nive.extensions.path.RewriteFilenamesTool", name = "Rewrite pool_filename based on title", description = "Rewrites all or empty filenames based on form selection.", apply = (IApplication,), mimetype = "text/html", data = [ FieldConf(id="types", datatype="checkbox", default="", settings=dict(codelist="types"), name="Object types", description=""), FieldConf(id="testrun", datatype="bool", default=1, name="Testrun, no commits", description=""), FieldConf(id="resetall", datatype="string", default="", size=15, name="Reset all filenames", description="<b>Urls will change! Enter 'reset all'</b>"), FieldConf(id="tag", datatype="string", default="rewriteFilename", hidden=1) ], views = [ ViewConf(name="", view=ToolView, attr="form", permission="admin", context="nive.extensions.path.RewriteFilenamesTool") ] ) class RewriteFilenamesTool(Tool): def _Run(self, **values): parameter = dict() if values.get("resetall")!="reset all": parameter["pool_filename"] = "" if values.get("types"): tt = values.get("types") if not isinstance(tt, list): tt = [tt] parameter["pool_type"] = tt operators = dict(pool_type="IN", pool_filename="=") fields = ("id", "title", "pool_type", "pool_filename") root = self.app.root recs = root.search.Search(parameter, fields, max=10000, operators=operators, sort="id", ascending=0) if len(recs["items"]) == 0: return "<h2>None found!</h2>", False user = values["original"]["user"] testrun = values["testrun"] result = [] cnt = 0 for rec in recs["items"]: obj = root.LookupObj(rec["id"]) if obj is None or not hasattr(obj, "TitleToFilename"): continue filename = obj.meta["pool_filename"] obj.TitleToFilename()
# Copyright (C) 2016 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com> # This file is part of the sos project: https://github.c
om/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the so
urce distribution for further information. from sos.plugins import Plugin, RedHatPlugin class Dracut(Plugin, RedHatPlugin): """ Dracut initramfs generator """ plugin_name = "dracut" packages = ("dracut",) def setup(self): self.add_copy_spec([ "/etc/dracut.conf", "/etc/dracut.conf.d" ]) self.add_cmd_output([ "dracut --list-modules", "dracut --print-cmdline" ]) # vim: set et ts=4 sw=4 :
# ####### # Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oauth2client import GOOGLE_TOKEN_URI from oauth2client.client import GoogleCredentials from .. import gcp from .. import constants class CloudResourcesBase(gcp.GoogleCloudApi): def __init__(self, config, logger, scope=constants.COMPUTE_SCOPE, discovery=constants.CLOUDRESOURCES_DISCOVERY, api_version=constants.API_V1): super(CloudResourcesBase, self).__init__( config, logger, scope, discovery, api_version) def get_credentials(self, scope): # check # run: gcloud beta auth application-default login # look to ~/.config/gcloud/application_default_credentials.json credentials = GoogleCredentials( access_tok
en=None, client_id=self.auth['client_id'], client_secret
=self.auth['client_secret'], refresh_token=self.auth['refresh_token'], token_expiry=None, token_uri=GOOGLE_TOKEN_URI, user_agent='Python client library' ) return credentials def get(self): raise NotImplementedError() def create(self): raise NotImplementedError() def delete(self): raise NotImplementedError()
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/ship/compon
ents/armor/shared_arm_reward_alderaan_elite.iff" result.attribute_template_id = 8 result.stfName("space/space_item","armor_reward_alderaan_elite") #### BEGIN MODIFICATIONS #### #### END MODIFICATION
S #### return result
#!/usr/bin/env python # # This code is a part of `ardrone_autopilot` project # which is distributed under the MIT license. # See `LICENSE` file for details. # """ This node is based on `base.py`. See there a documentation. Inputs ------ * in/image -- main picture stream. Outputs ------- * out/image -- result image. Parameters ---------- * ~show = False [bool] -- show the result instead of publishing it. * ~encoding = "bgr8" [str] -- video encoding used by bridge. """ impor
t rospy import cv2 import tf from tf.transformations import quaternion_matrix import numpy as np import image_geometry import math from base import BaseStreamHandler class Show(BaseStreamHandler): def __init_
_(self, *args, **kwargs): self.tf = tf.TransformListener() self.camera_model = image_geometry.PinholeCameraModel() super(Show, self).__init__(*args, **kwargs) def on_image(self, img): if self.info is None: return self.camera_model.fromCameraInfo(self.info) # self.camera_model.rectifyImage(img, img) self.tf.waitForTransform('ardrone/odom', 'ardrone/ardrone_base_frontcam', rospy.Time(0), rospy.Duration(3)) trans, rot = self.tf.lookupTransform('ardrone/odom', 'ardrone/ardrone_base_frontcam', rospy.Time(0)) rot_matrix = np.array(quaternion_matrix(rot)) for a in range(0, 360, 30): vector = np.array(np.array([0.1 * math.cos(a * math.pi / 180), 0.1 * math.sin(a * math.pi / 180), 0, 0])) point = vector.dot(rot_matrix) x, y = self.camera_model.project3dToPixel(point) cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1) return img if __name__ == "__main__": Show.launch_node()
1da4, 0x1da5, 0x1da6, 0x1da7, 0x1da8, 0x1da9, 0x1daa, 0x1dab, 0x1dac, 0x1dad, 0x1dae, 0x1daf, 0x1db0, 0x1db1, 0x1db2, 0x1db3, 0x1db4, 0x1db5, 0x1db6, 0x1db7, 0x1db8, 0x1db9, 0x1dba, 0x1dbb, 0x1dbc, 0x1dbd, 0x1dbe, 0x1dbf, 0x2071, 0x207f, 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097, 0x2098, 0x2099, 0x209a, 0x209b, 0x209c, 0x2c7c, 0x2c7d, 0xa69c, 0xa69d, 0xa770, 0xa7f8, 0xa7f9, 0xab5c, 0xab5d, 0xab5e, 0xab5f, } # utf8proc does not store if a codepoint is numeric numeric_info_missing = { 0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03, 0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96, 0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70, 0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341, 0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2, 0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a, 0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10, 0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e, 0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621, 0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973, 0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, 0x10fc5, 0x10fc6, 0x10fc7, 0x10fc8, 0x10fc9, 0x10fca, 0x10fcb, } # utf8proc has no no digit/numeric information digit_info_missing = { 0xb2, 0xb3, 0xb9, 0x1369, 0x136a, 0x136b, 0x136c, 0x136d, 0x136e, 0x136f, 0x1370, 0x1371, 0x19da, 0x2070, 0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087, 0x2088, 0x2089, 0x2460, 0x2461, 0x2462, 0x2463, 0x2464, 0x2465, 0x2466, 0x2467, 0x2468, 0x2474, 0x2475, 0x2476, 0x2477, 0x2478, 0x2479, 0x247a, 0x247b, 0x247c, 0x2488, 0x2489, 0x248a, 0x248b, 0x248c, 0x248d, 0x248e, 0x248f, 0x2490, 0x24ea, 0x24f5, 0x24f6, 0x24f7, 0x24f8, 0x24f9, 0x24fa, 0x24fb, 0x24fc, 0x24fd, 0x24ff, 0x2776, 0x2777, 0x2778, 0x2779, 0x277a, 0x277b, 0x277c, 0x277d, 0x277e, 0x2780, 0x2781, 0x2782, 0x2783, 0x2784, 0x2785, 0x2786, 0x2787, 0x2788, 0x278a, 0x278b, 0x278c, 0x278d, 0x278e, 0x278f, 0x2790, 0x2791, 0x2792, 0x10a40, 0x10a41, 0x10a42, 0x10a43, 0x10e60, 0x10e61, 0x10e62, 0x10e63, 0x10e64, 0x10e65, 0x10e66, 0x10e67, 0x10e68, } numeric_info_missing = { 0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03, 0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96, 0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70, 0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341, 0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2, 0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a, 0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10, 0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e, 0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621, 0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973, 0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, } codepoints_ignore = { 'is_alnum': numeric_info_missing | digit_info_missing | unknown_issue_is_alpha, 'is_alpha': unknown_issue_is_alpha, 'is_digit': digit_info_missing, 'is_numeric': numeric_info_missing, 'is_lower': utf8proc_issue_is_lower } @pytest.mark.parametrize('function_name', ['is_alnum', 'is_alpha', 'is_ascii', 'is_decimal', 'is_digit', 'is_lower', 'is_numeric', 'is_printable', 'is_space', 'is_upper', ]) @pytest.mark.parametrize('variant', ['ascii', 'utf8']) def test_string_py_compat_boolean(function_name, variant): arrow_name = variant + "_" + function_name py_name = function_name.replace('_', '') ignore = codepoints_ignore.get(function_name, set()) |\ find_new_unicode_codepoints() for i in range(128 if ascii else 0x11000): if i in range(0xD800, 0xE000): continue # bug? pyarrow doesn't allow utf16 surrogates # the issues we know of, we skip if i in ignore: continue # Compare results with the equivalent Python predicate # (except "is_space" where functions are known to be incompatible) c = chr(i) if hasattr(pc, arrow_name) and function_name != 'is_space': ar = pa.array([c]) arrow_func = getattr(pc, arrow_name) assert arrow_func(ar)[0].as_py() == getattr(c, py_name)() @pytest.mark.parametrize(('ty', 'values'), all_array_types) def test_take(ty, values): arr = pa.array(values, type=ty) for indices_type in [pa.int8(), pa.int64()]: indices = pa.array([0, 4, 2, None], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([values[0], values[4], values[2], None], type=ty) assert result.equals(expected) # empty indices indices = pa.array([], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([], type=ty) assert result.equals(expected) indices = pa.array([2, 5]) with pytest.raises(IndexError): arr.take(indices) indices = pa.array([2, -1]) with pytest.raises(IndexError): arr.take(indices) def test_take_indices_types(): arr = pa.array(range(5)) for indices_type in ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64']: indices = pa.array([0, 4, 2, None], type=indices_type) result = arr.take(indices) result.validate() expected = pa.array([0, 4, 2, None]) assert result.equals(expected) for indices_type in [pa.float32(), pa.float64()]: indices = pa.array([0, 4, 2], type=indices_type) with pytest.raises(NotImplementedError): arr.take(indices) def test_take_on_chunked_array(): # A
RROW-9504 arr = pa.chunked_array
([ [ "a", "b", "c", "d", "e" ], [ "f", "g", "h", "i", "j" ] ]) indices = np.array([0, 5, 1, 6, 9, 2]) result = arr.take(indices) expected = pa.chunked_array([["a", "f", "b", "g", "j", "c"]]) assert result.equals(expected) indices = pa.chunked_array([[1], [9, 2]]) result = arr.take(indices) expected = pa.chunked_array([ [ "b" ], [ "j", "c" ] ]) assert result.equals(expected) @pytest.mark.parametrize('ordered', [False, True]) def test_take_dictionary(ordered): arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], ordered=ordered) result = arr.take(pa.array([0, 1, 3])) result.validate() assert result.to_pylist() == ['a', 'b', 'a'] assert result.dictionary.to_pylist() == ['a', 'b', 'c'] assert result.type.ordered is ordered def test_take_null_type(): # ARROW-10027 arr = pa.array([None] * 10) chunked_arr = pa.chunked_array([[None] * 5] * 2) batch = pa.record_batch([arr], names=['a']) table = pa.table({'a': arr}) indices = pa.array([1, 3, 7, None]) assert len(arr.take(indices)) == 4 assert len(chunked_arr.take(indices)) == 4 assert len(batch.take(indices).column(0)) == 4 assert len(table.take(indices).column(0)) == 4 @pytest.mark.parametrize(('ty', 'values'), all_array_types) def test_filter(ty, values): arr = pa.array(values, type=ty) mask = pa.array([True, False, False, True, None]) result = arr.filter(mask, null_selection_behavior='drop') result.validate() assert result.equals(pa.array([values[0], values[3]], type=ty)) result = arr.filter(mask, null_selection_behavior='emit_null') result.validate() assert result.equals(pa.array([values[0], values[3], None], type=ty)) # non-boolean dtype mask = pa.array([0, 1, 0, 1, 0]) with pytest.raises(NotImplementedError): arr.filter(mask) # wrong length mask = pa.array([True, False, True]) with pytest.raises(ValueError, match="must all be the same length"): arr.filter(mask) def test_filter_chunked_array(): arr = pa.chunked_array([["a",
import base64 import json import responses from mapbox.services.datasets import Datasets username = 'testuser' access_token = 'pk.{0}.test'.format( base64.b64encode(b'{"u":"testuser"}').decode('utf-8')) def test_class_attrs(): """Get expected class attr values""" serv = Datasets() assert serv.api_name == 'datasets' assert serv.api_version == 'v1' def test_datasets_service_properties(): """Get expected username and baseuri.""" datasets = Datasets(access_token=access_token) assert datasets.username == username assert datasets.baseuri == 'https://api.mapbox.com/datasets/v1' @responses.activate def test_datasets_list(): """Listing datasets works""" body = ''' [ { "owner": "testuser", "id": "ds1", "created": "2015-09-19", "modified": "2015-09-19" }, { "owner": "testuser", "id": "ds2", "created": "2015-09-19", "modified": "2015-09-19" } ] ''' responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}?access_token={1}'.format( username, access_token), match_querystring=True, body=body, status=200, content_type='application/json') response = Datasets(access_token=access_token).list() assert response.status_code == 200 assert [item['id'] for item in response.json()] == ['ds1', 'ds2'] @responses.activate def test_datasets_create(): """Creating a named and described dataset works.""" def request_callback(request): payload = json.loads(request.body.decode()) resp_body = { 'owner': username, 'id': 'new', 'name': payload['name'], 'description': payload['description'], 'created': '2015-09-19', 'modified': '2015-09-19'} headers = {} return (200, headers, json.dumps(resp_body)) responses.add_callback( responses.POST, 'https://api.mapbox.com/datasets/v1/{0}?access_token={1}'.format( username, access_token), match_querystring=True, callback=request_callback) response = Datasets(access_token=access_token).create( name='things', description='a collection of things') assert response.status_code == 200 assert response.json()['name'] == 'things' assert response.json()['description'] == 'a collection of things' @responses.activate def test_dataset_read(): """Dataset name and description reading works.""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format( username, 'test', access_token), match_querystring=True, body=json.dumps( {'name': 'things', 'description': 'a collection of things'}), status=200, content_type='application/json') response = Datasets(access_to
ken=access_token).read_dataset('test') assert response.status_code == 200 assert response.json()['name'] == 'things' assert response.json()['description'] == 'a collection of things' @responses.activate def test_dataset_update(): """Updating dataset name and description works.""" def request_callback(request)
: payload = json.loads(request.body.decode()) resp_body = { 'owner': username, 'id': 'foo', 'name': payload['name'], 'description': payload['description'], 'created': '2015-09-19', 'modified': '2015-09-19'} headers = {} return (200, headers, json.dumps(resp_body)) responses.add_callback( responses.PATCH, 'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format( username, 'foo', access_token), match_querystring=True, callback=request_callback) response = Datasets(access_token=access_token).update_dataset( 'foo', name='things', description='a collection of things') assert response.status_code == 200 assert response.json()['name'] == 'things' assert response.json()['description'] == 'a collection of things' @responses.activate def test_delete_dataset(): """Delete a dataset""" responses.add( responses.DELETE, 'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format( username, 'test', access_token), match_querystring=True, status=204) response = Datasets(access_token=access_token).delete_dataset('test') assert response.status_code == 204 @responses.activate def test_dataset_list_features(): """Features retrieval work""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}'.format( username, 'test', access_token), match_querystring=True, body=json.dumps({'type': 'FeatureCollection'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).list_features('test') assert response.status_code == 200 assert response.json()['type'] == 'FeatureCollection' @responses.activate def test_dataset_list_features_reverse(): """Features retrieval in reverse works""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}&reverse=true'.format( username, 'test', access_token), match_querystring=True, body=json.dumps({'type': 'FeatureCollection'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).list_features( 'test', reverse=True) assert response.status_code == 200 assert response.json()['type'] == 'FeatureCollection' @responses.activate def test_dataset_list_features_pagination(): """Features retrieval pagination works""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}&start=1&limit=1'.format( username, 'test', access_token), match_querystring=True, body=json.dumps({'type': 'FeatureCollection'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).list_features( 'test', start=1, limit=1) assert response.status_code == 200 assert response.json()['type'] == 'FeatureCollection' # Tests of feature-scoped methods. @responses.activate def test_read_feature(): """Feature read works.""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format( username, 'test', '1', access_token), match_querystring=True, body=json.dumps({'type': 'Feature', 'id': '1'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).read_feature('test', '1') assert response.status_code == 200 assert response.json()['type'] == 'Feature' assert response.json()['id'] == '1' @responses.activate def test_update_feature(): """Feature update works.""" def request_callback(request): payload = json.loads(request.body.decode()) assert payload == {'type': 'Feature'} return (200, {}, "") responses.add_callback( responses.PUT, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format( username, 'test', '1', access_token), match_querystring=True, callback=request_callback) response = Datasets(access_token=access_token).update_feature( 'test', '1', {'type': 'Feature'}) assert response.status_code == 200 @responses.activate def test_delete_feature(): """Deletes a feature.""" responses.add( responses.DELETE, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format( username, 'test', '1', access_token), match_querystring=True, status=204) response = Datasets(access_token=access_token).delete_feature('test', '1') assert response.status_code == 204
# encoding=utf-8 from tsundiary import app app.jinja_env.globals.update(theme_nicename = { 'classic': 'Classic Orange', 'tsun-chan': 'Classic Orange w/ Tsundiary-chan', 'minimal': 'Minimal Black/Grey', 'misato-tachibana': 'Misato Tachibana', 'rei-ayanami': 'Rei Ayanami', 'rei-ayanami-2': 'Rei Ayanami 2', 'saya': 'Saya', 'yuno': 'Yuno Gasai', 'hitagi': 'Hitagi Senjougahara', 'kyoko-sakura': 'Kyoko Sakura', 'colorful': 'Based on favorite color' }) app.jinja_env.globals.update(themes = ['classic', 'tsun-chan', 'minimal', 'misato-tachibana', 'rei-ayanami', 'rei-ayanami-2', 'saya', 'yuno', 'colorful']) app.jinja_env.globals.update(theme_creds = { 'tsun-chan': 'Artist: <span title="<3">bdgtard</span>', 'misato-tachibana': 'Misato Tachibana source: Nichijou OP1.', 'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei source</a>.', 'saya': u'<a href="http://e-shuush
uu.net/image/34277/">Saya source</a>. Artist: 中央東口 (Chuuou Higashiguchi).', 'yuno': '<a href="http://xyanderegirl.deviantart.com/art/Yuno-Gasai-Render-293856645">Yuno source</a>.', 'kyoko-sakura': '<a href="http://3071527.deviantart.com/art/kyoko-sakura-376238110">Kyoko source</a>.' }) app.jinja
_env.globals.update(theme_colors = [ ('Red', '0,100,100'), ('Orange', '35,100,100'), ('Yellow', '50,100,100'), ('Green', '120,100,80'), ('Cyan', '180,100,80'), ('Blue', '215,100,100'), ('Purple', '270,100,100'), ('Black', '0,0,0'), ('Grey', '0,0,70'), ('White', '0,0,100'), ])
# GUI Application automation and testing library # Copyright (C) 2006-2018 Mark Mc Mahon and Contributors # https://github.com/pywinauto/pywinauto/graphs/contributors # http://pywinauto.readthedocs.io/en/latest/credits.html # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of pywinauto nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for win32functions.py""" import unittest import sys sys.path.append(".") from pywinauto.win32structures import POINT # noqa: E402 from pywinauto.win32structures import RECT # noqa: E402 from pywinauto.win32functions import MakeLong, HiWord, LoWord # noqa: E402 class Win32FunctionsTestCases(unittest.TestCase): "Unit tests for the win32function methods" def testMakeLong(self): data = ( (0, (0, 0)), (1, (0, 1)), (0x10000, (1, 0)), (0xffff, (0, 0xffff)), (0xffff0000, (0xffff, 0)), (0xffffffff, (0xffff, 0xffff)), (0, (0x10000, 0x10000)), ) for result, (hi, lo) in data: self.assertEqual(result, MakeLong(hi, lo)) def testMakeLong_zero(self): "test that makelong(0,0)" self.assertEqual(0, MakeLong(0, 0)) def testMakeLong_lowone(self): "Make sure MakeLong() function works with low word == 1" self.assertEqual(1, MakeLong(0, 1)) def testMakeLong_highone(self): "Make sure MakeLong() function works with high word == 1" self.assertEqual(0x10000, MakeLong(1, 0)) def testMakeLong_highbig(self): "Make sure MakeLong() function works with big numder in high word" self.assertEqual(0xffff0000, MakeLong(0xffff, 0)) def testMakeLong_lowbig(self): "Make sure MakeLong() function works with big numder in low word" self.assertEqual(0xffff, MakeLong(0, 0xffff)) def testMakeLong_big(self): "Make sure MakeLong() function works with big numders in 2 words" self.assertEqual(0xffffffff, MakeLong(0xffff, 0xffff)) def testLowWord_zero(self): self.assertEqual(0, LoWord(0)) def testLowWord_one(self): self.assertEqual(1, LoWord(1)) def testLowWord_big(self): self.assertEqual(1, LoWord(MakeLong(0xffff, 1))) def testLowWord_vbig(self): self.assertEqual(0xffff, LoWord(MakeLong(0xffff, 0xffff))) def testHiWord_zero(self): self.assertEqual(0, HiWord(0)) def testHiWord_one(self): self.assertEqual(0, HiWord(1)) def testHiWord_bigone(self): self.assertEqual(1, HiWord(0x10000)) def testHiWord_big(self): self.assertEqual(0xffff, HiWord(MakeLong(0xffff, 1))) def testHiWord_vbig(self): self.assertEqual(0xffff, HiWord(MakeLong(0xffff, 0xffff))) def testPOINTindexation(self): p = POINT(1, 2) self.assertEqual(p[0], p.x) self.assertEqual(p[1], p.y) self.assertEqual(p[-2], p.x) self.assertEqual(p[-1], p.y) self.assertRaises(IndexError, lambda: p[2]) self.assertRaises(IndexError, lambda: p[-3]) def testPOINTiteration(self): p = POINT(1, 2) self.assertEqual([1, 2], [i for i in p]) def testPOINTcomparision(self): """Test POINT comparision operations""" p0 = POINT(1, 2) p1 = POINT(0, 2) self.assertNotEqual(p0, p1) p1.x = p0.x self.assertEqual(p0, p1) # tuple comparision self.assertEqual(p0, (1, 2)) self.assertNotEqual(p0, (0, 2))
# wrong type comparision self.assertNotEqual(p0,
1) def test_RECT_hash(self): """Test RECT is hashable""" r0 = RECT(0) r1 = RECT(1) d = { "r0": r0, "r1": r1 } self.assertEqual(r0, d["r0"]) self.assertEqual(r1, d["r1"]) self.assertNotEqual(r0, r1) def test_RECT_repr(self): """Test RECT repr""" r0 = RECT(0) self.assertEqual(r0.__repr__(), "<RECT L0, T0, R0, B0>") if __name__ == "__main__": unittest.main()
ng import os from ftplib import FTP as FTPClient from paramiko import SFTPClient, Transport as SFTPTransport ALLOWED_BACKEND_TYPES = ['ftp', 'sftp'] DEFAULT_BACKEND_TYPE = 'ftp' from wok_hooks.misc import Configuration as _Configuration class Configuration(_Configuration): def __init__(self, path, **kwargs): _Configuration.__init__(self, path, **kwargs) if not 'type' in self or not self['type'] in ALLOWED_BACKEND_TYPES: self['type'] = DEFAULT_BACKEND_TYPE self.save() class Observable: def __init__(self, observer=None): self._observer = [] if observer: for item in observer: self.register_observer(item) def register_observer(self, observer): self._observer.append(observer) class Stateful(Observable): def __init__(self, observer=None): if not hasattr(self, '_state'): self._state = None Observable.__init__(self, observer) if self._state is None: raise NotImplementedError() @property def state(self): return self._state @state.setter def state(self, value): if value != self._state: self._state = value logging.info('%s is now %s' % (self, value)) self._raise_state_update() def _raise_state_update(self): for observer in self._observer: observer.on_state_update(self) class FileBackend(Stateful): STATE_DISCONNECTED = 'disconnected' STATE_CONNECTED = 'connected' class ConnectionException(Exception): pass def __init__(self, config, observer=None): self.config = config self._state = self.STATE_DISCONNECTED Stateful.__init__(self, observer) def file_create_folder(self, path): raise NotImplementedError() def put_file(self, path, file_handle): raise NotImplementedError() def get_metadata(self, path): raise NotImplementedError() def get_file_and_metadata(self, path): raise NotImplementedError() def get_root_path(self): raise NotImplementedError() def connect(self): raise NotImplementedError() def disconnect(self): raise NotImplementedError() class FTP(FileBackend): def __init__(self, config): FileBackend.__init__(self, config) self._init_config() self.session = None self._init_session() DEFAULT_CONFIG = { 'ftp_host': 'localhost', 'ftp_user': 'anonymous', 'ftp_password': '', 'ftp_output_path': ''} def _init_config(self): some_changes = False if 'type' in self.config: for option, value in FTP.DEFAULT_CONFIG.items(): if not option in self.config: self.config[option] = value some_changes = True logging.info('set default ftp config.') else: self.config['type'] = 'ftp' self.config.update(FTP.DEFAULT_CONFIG) some_changes = True logging.info('set default ftp config.') if some_changes: self.config.save() def _init_session(self): self.connect() def connect(self): self._authenticate() self.state = self.STATE_CONNECTED def _authenticate(self): self.session = FTPClient(self.config['ftp_host'], self.config['ftp_user'], self.config['ftp_password']) logging.info('FTP Authorization succeed') def disconnect(self): if self.session: self.session.quit() def file_create_folder(self, path): if self.state == self.STATE_CONNECTED: self.session.cwd('/') dirlist = path.split('/') while '' in dirlist: dirlist.remove('') previous = self.session.pwd() for dirname in dirlist: dir_contents = self.session.nlst(previous) if not dirname in dir_contents: self.session.mkd(dirname) self.session.cwd(dirname) previous += dirname + '/' elif self.state == self.STATE_DISCONNECTED: raise self.ConnectionException('FTP is %s' % self.state) else: raise NotImplementedError() def put_file(self, path, file_handle): if self.state == self.STATE_CONNECTED: dirpath = '/'.join(path.split('/')[:-1]) self.file_create_folder(dirpath) self.session.storbinary('STOR ' + path.split('/')[-1], file_handle) elif self.state == self.STATE_DISCONNECTED: raise self.ConnectionException('FTP is %s' % self.state) else: raise NotImplementedError() def get_root_path(self): raise NotImplementedError() class SFTP(FileBackend): def __init__(self, config): FileBackend.__init__(self, config) self._init_config() self.session = None self._init_session() DEFAULT_CONFIG = { 'sftp_host': 'localhost', 'sftp_port': 22, 'sftp_user': 'anonymous', 'sftp_password': '', 'output_path': ''} def _init_config(self): some_changes = False if 'type' in self.config: for option, value in SFTP.DEFAULT_CONFIG.items(): if not option in self.config: self.config[option] = value some_changes = True logging.info('set default sftp config.') else: self.config['type'] = 'sftp' self.config.update(SFTP.DEFAULT_CONFIG) some_changes = True logging.info('set default sftp config.') if some_changes: self.config.save() # cast config types self.config['sftp_port'] = int(self.config['sftp_port']) def _init_session(self): self.connect() def connect(self): self._authenticate() self.state = self.STATE_CONNECTED def _authenticate(self): self._transport = SFTPTransport((self.config['sftp_host'], self.config['sftp_port'])) self._transport.connect(username=self.config['sftp_user'], password=self.config['sftp_password']) self.session = SFTPClient.from_transport(self._transport) logging.info('SFTP Authorization succeed') def disconnect(self): self.session.close() self._transport.close() def file_create_folder(self, path): if self.state == self.STATE_CONNECTED: dirlist = path.split('/') current_dirlist = [''] missing_dirlist = [] current_dirlist.extend(dirlist[:]) while len(current_dirlist) > 0: current_path = '/'.join(current_dirlist) try: self.session.chdir(current_path) break e
xcept: missing_dirlist.append(current_dirlist.pop()) missing_dirlist.reverse() for dirname in missing_dirlist: dir_contents = self.session.listdir() if not dirname in dir_contents: self.session.mkdir(dirname) logging.info('Create remote directory %s' % self.session.getcwd() + '/' + dirname) self.session.chdir(dirname) elif
self.state == self.STATE_DISCONNECTED: raise self.ConnectionException('SFTP is %s' % self.state) else: raise NotImplementedError() def put_file(self, path, file_handle): if self.state == self.STATE_CONNECTED: dirpath = '/'.join(path.split('/')[:-1]) self.file_create_folder(dirpath) try: self.session.putfo(fl=file_handle, remotepath='/' + path) logging.info('Create remote file %s' % '/' + path) except Exception as ex: logging.error(ex) elif self.state == self.STATE_DISCONNECTED:
from datetime import datetime from email.mime import text as mime_text from unittest.mock import MagicMock from unittest.mock import Mock from unittest.mock import patch import cauldron as cd from cauldron.session import reloading from cauldron.test import support from cauldron.test.support import scaffolds from cauldron.test.support.messages import Message class TestSessionReloading(scaffolds.ResultsTest): """Test suite for the reloading module""" def test_watch_bad_argument(self): """Should not reload a module""" self.assertFalse( reloading.refresh(datetime, force=True), Message('Should not reload not a module') ) def test_watch_good_argument(self): """Should reload the specified package/subpackage""" self.assertTrue( reloading.refresh('datetime', force=True), Message('Should reload the datetime module') ) def test_watch_not_needed(self): """Don't reload modules that haven't changed.""" support.create_project(self, 'betty') project = cd.project.get_internal_project() project.current_step = project.steps[0] self.assertFalse( reloading.refresh(mime_text), Message('Expect no reload if the step has not been run before.') ) support.run_command('run') project.current_step = project.steps[0] self.assertFalse( reloading.refresh(mime_text), Message('Expect no reload if module has not changed recently.') ) def test_watch_recursive(self): """Should reload the email module.""" self.assertTrue( reloading.refresh('email', recursive=True, force=True), Message('Expected email module to be reloaded.') ) def test_get_module_name(self): """Should get the module name from the name of its spec.""" target = MagicMock() target.__spec__ = MagicMock() target.__spec__.name = 'hello' self.assertEqual('hello', reloading.get_module_name(target)) def test_get_module_name_alternate(self): """ Should get the module name from its dunder name if the spec name does not exist. """ target = Mock(['__name__']) target.__name__ = 'hello' self.assertEqual('hello', reloading.get_module_name(target))
@patch('cauldron.session.reloading.os.path') @patch('cauldron.session.reload
ing.importlib.reload') def test_do_reload_error(self, reload: MagicMock, os_path: MagicMock): """Should fail to import the specified module and so return False.""" target = MagicMock() target.__file__ = None target.__path__ = ['fake'] os_path.getmtime.return_value = 10 reload.side_effect = ImportError('FAKE') self.assertFalse(reloading.do_reload(target, 0)) self.assertEqual(1, reload.call_count) @patch('cauldron.session.reloading.os.path') @patch('cauldron.session.reloading.importlib.reload') def test_do_reload(self, reload: MagicMock, os_path: MagicMock): """Should import the specified module and return True.""" target = MagicMock() target.__file__ = 'fake' os_path.getmtime.return_value = 10 self.assertTrue(reloading.do_reload(target, 0)) self.assertEqual(1, reload.call_count) @patch('cauldron.session.reloading.os.path') @patch('cauldron.session.reloading.importlib.reload') def test_do_reload_skip(self, reload: MagicMock, os_path: MagicMock): """ Should skip reloading the specified module because it hasn't been modified and return False. """ target = MagicMock() target.__file__ = 'fake' os_path.getmtime.return_value = 0 self.assertFalse(reloading.do_reload(target, 10)) self.assertEqual(0, reload.call_count) def test_reload_children_module(self): """Should abort as False for a module that has no children.""" target = Mock() reloading.reload_children(target, 10)
#!/usr/bin/python # -*- coding: utf8 -*- """ Copyright (C) 2012 Xycl This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import xbmc, xbmcgui import common from urllib2 import Request, urlopen from urllib import urlencode from os.path import join,isfile,basename import os from traceback import print_exc LABEL_TEXT = 100 BUTTON_CLOSE = 101 BUTTON_ZOOM_IN = 102 BUTTON_ZOOM_OUT = 103 GOOGLE_MAP = 200 CANCEL_DIALOG = ( 9, 10, 92, 216, 247, 257, 275, 61467, 61448, ) ACTION_SELECT_ITEM = 7 ACTION_MOUSE_START = 100 ACTION_TAB = 18 SELECT_ITEM = (ACTION_SELECT_ITEM, ACTION_MOUSE_START) ACTION_DOWN = [4] ACTION_UP = [3] class GoogleMap( xbmcgui.WindowXMLDialog ): def __init__( self, xml, cwd, default): xbmcgui.WindowXMLDialog.__init__(self) def onInit( self ): self.setup_all('') def onAction( self, action ): # Close if ( action.getId() in CANCEL_DIALOG or self.getFocusId() == BUTTON_CLOSE and action.getId() in SELECT_ITEM ): self.close() # Zoom in elif ( action.getId() in SELECT_ITEM and self.getFocusId() == BUTTON_ZOOM_IN or action in ACTION_UP): self.zoom('+') # Zoom out elif ( action.getId() in SELECT_ITEM and self.getFocusId() == BUTTON_ZOOM_OUT or action in ACTION_DOWN): self.zoom('-') def set_file(self, filename): self.filename = filename def set_place(self, place): self.place = place def set_datapath(self, datapath): self.datapath = datapath def set_pic(self, pic): pass def set_map(self, mapfile): self.getControl( GOOGLE_MAP ).setImage(mapfile) def setup_all( self, filtersettings = ""): self.getControl( LABEL_TEXT ).setLabel( common.getstring(30220) ) self.getControl( BUTTON_CLOSE ).setLabel( common.getstring(30224) ) self.getControl( BUTTON_ZOOM_IN ).setLabel( common.getstring(30225) ) self.getControl( BUTTON_ZOOM_OUT ).setLabel( common.getstring(30226) ) self.zoomlevel = 15 self.zoom_max = 21 self.zoom_min = 0 self.load_map() def zoom(self,way,step=1): if way=="+": self.zoomlevel = self.zoomlevel + step elif way=="-": self.zoomlevel = self.zoomlevel - step else: self.zoomlevel = step if self.zoomlevel > self.zoom_max: self.zoomlevel = self.zoom_max elif self.zoomlevel < self.zoom_min: self.zoomlevel = self.zoom_min self.load_map() def load_map(self): #google geolocalisation static_url = "http://maps.google.com/maps/api/staticmap?" param_dic = {#location parameters (http://gmaps-samples.googlecode.com/svn/trunk/geocoder/singlegeocode.html) "center":"", #(required if markers not present) "zoom":self.zoomlevel, # 0 to 21+ (req if no markers #map parameters "size":"640x640", #widthxheight (required) "format":"jpg", #"png8","png","png32","gif","jpg","jpg-baseline" (opt) "maptype":"hybrid
", #"roadmap","satellite","hybrid","terrain" (opt) "language":"", #Feature Parameters: "markers" :"color:red|label:P|%s",#(opt) #markers=color:red|label:P|lyon|12%20rue%20madiraa|marseille|Lille #&markers=color:blue|label:P|Australie "pa
th" : "", #(opt) "visible" : "", #(opt) #Reporting Parameters: "sensor" : "false" #is there a gps on system ? (req) } param_dic["markers"]=param_dic["markers"]%self.place request_headers = { 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; fr; rv:1.9.2.10) Gecko/20100914 Firefox/3.6.10' } request = Request(static_url+urlencode(param_dic), None, request_headers) try: urlfile = urlopen(request) except: dialog = xbmcgui.Dialog() dialog.ok('XBMC Network Error', 'Google maps is not reachable') self.close() return extension = urlfile.info().getheader("Content-Type","").split("/")[1] filesize = int(urlfile.info().getheader("Content-Length","")) mappath = xbmc.translatePath(self.datapath) mapfile = join(self.datapath,basename(self.filename).split(".")[0]+"_maps%s."%self.zoomlevel+extension) mapfile = xbmc.translatePath(mapfile) # test existence of path if not os.path.exists(mappath): os.makedirs(mappath) label = self.getControl( LABEL_TEXT ) if not isfile(mapfile): #mapfile is not downloaded yet, download it now... try: #f=open(unicode(mapfile, 'utf-8'),"wb") f=open(common.smart_unicode(mapfile), "wb") except: try: f=open(common.smart_utf8(mapfile), "wb") except: print_exc() #print "GEO Exception: "+mapfile for i in range(1+(filesize/10)): f.write(urlfile.read(10)) label.setLabel(common.getstring(30221)%(100*(float(i*10)/filesize)))#getting map... (%0.2f%%) urlfile.close() #pDialog.close() try: f.close() except: print_exc() self.set_pic(self.filename) self.set_map(mapfile) label.setLabel(common.getstring(30222)%int(100*(float(self.zoomlevel)/self.zoom_max)))#Zoom level %s
from random import randint import gobject import clutter import mxpy as mx sort_set = False filter_set = False def sort_func(model, a, b, data): return int(a.to_hls()[0] - b.to_hls()[0]) def filter_func(model, iter, data): color = iter.get(0)[0] h = color.to_hls()[0] return (h > 90 and h < 180) def key_release_cb(stage, event, model): from clutter import keysyms global sort_set, filter_set if event.keyval == keysyms.s: if not sort_set: model.set_sort(0, sort_func, None) else: model.set_sort(-1, None, None) sort_set
= not sort_set elif event.keyval == keysyms.f: if not filter_set: model.set_filter(filter_func) else: model.set_filter(None, None) filter_set = not filter_set if __name__ == '__main__': stage = clutter.Stage() stage.connect('destroy', clutter.main_quit) sta
ge.set_color((255, 255, 255, 255)) stage.set_size(320, 240) color = clutter.Color(0x0, 0xf, 0xf, 0xf) scroll = mx.ScrollView() scroll.set_size(*stage.get_size()) stage.add(scroll) view = mx.ItemView() scroll.add(view) model = clutter.ListModel(clutter.Color, "color", float, "size") for i in range(360): color = clutter.color_from_hls(randint(0, 255), 0.6, 0.6) color.alpha = 0xff model.append(0, color, 1, 32.0) view.set_model(model) view.set_item_type(clutter.Rectangle) view.add_attribute("color", 0) view.add_attribute("width", 1) view.add_attribute("height", 1) stage.connect('key-release-event', key_release_cb, model) stage.show() clutter.main()
# Copyright 2010-2011, Sikuli.org # Released under the MIT License. from org.sikuli.script import VDictProxy import java.io.File ## # VDict implements a visual dictionary that has Python's conventional dict # interfaces. # # A visual dictionary is a data type for storing key-value pairs using # images as keys. Using a visual dictionary, a user can easily automate # the tasks of saving and retrieving arbitrary data objects by images. # The syntax of the visual dictionary data type is modeled after that of # the built-in Python dictionary data type. class VDict(VDictProxy): ## # the default similarity for fuzzy matching. The range of this is from # 0 to 1.0, where 0 matches everything and 1.0 does exactly matching. # <br/> # The default similarity is 0.7. _DEFAULT_SIMILARITY = 0.7 _DEFAULT_GET_ITEM_N = 0 ## # Constructs a new visual dictionary with the same mapping as the given dict. # def __init__(self, dict=None): self._keys = {} if dict: for k in dict.keys(): self[k] = dict[k]
## # Returns the number of keys in this visual dictionary. # def __len__(self): return self.size() ## # Maps the specified key to the specified item in this visual dictionary. # def __setitem__(self, key, item): self.insert(key, item) self._keys[key] = item ## # Tests if the specified object looks like a key in this visual dictionary # with the default similarity. # def __contains__(self, key): retu
rn len(self.get(key)) > 0 ## # Returns all values to which the specified key is fuzzily matched in # this visual dictionary with the default similarity. # <br/> # This is a wrapper for the {@link #VDict.get get} method. def __getitem__(self, key): return self.get(key) ## # Deletes the key and its corresponding value from this visual dictionary. # def __delitem__(self, key): self.erase(key) del self._keys[key] ## # Returns a list of the keys in this visual dictionary. # def keys(self): return self._keys.keys() ## # Returns the value to which the specified key is exactly matched in # this visual dictionary. # def get_exact(self, key): if key == None: return None return self.lookup(key) ## # Returns the values to which the specified key is fuzzily matched in # this visual dictionary with the given similarity and the given maximum # number of return items. # @param similarity the similarity for matching. # @param n maximum number of return items. # def get(self, key, similarity=_DEFAULT_SIMILARITY, n=_DEFAULT_GET_ITEM_N): if key == None: return None return self.lookup_similar_n(key, similarity, n) ## # Returns the value to which the specified key is best matched in # this visual dictionary with the given similarity. # @param similarity the similarity for matching. # def get1(self, key, similarity=_DEFAULT_SIMILARITY): if key == None: return None return self.lookup_similar(key, similarity)
ajectory. scalar float or ndarray of shape (ndim,) :param length: The length of this trajectory, integer. :param lnP0: optional The lnprob value of the initial position (can be used to save a call to lnprob) :param grad0: optional The gradients of the lnprob function at `theta0`, ndarray of shape (ndim,) :returns theta: The final position vector, which if the trajectory was not accepted will be equal to the initial position. ndarray of shape (ndim,) :returns lnP: The ln-probability at the final position, float. :returns grad: The gradient of the ln-probability at the final position, ndarray of shape (ndim,) :returns accepted: Whether the trajectory was accepted (1.0) or not (0.0) """ if self.store_trajectories: self.trajectories.append(np.zeros([length, self.ndim])) # --- Set up for the run ---- # save initial position theta = theta0.copy() # random initial momenta p0 = self.draw_momentum() # gradient in U at initial position, negative of gradient lnP if grad0 is None: grad0 = -self.lnprob_grad(theta0) if lnP0 is None: lnP0 = self.lnprob(theta0) # use copies of initial momenta and gradient p, grad = p0.copy(), grad0.copy() # --- Compute Trajectory --- # do 'length' leapfrog steps along the trajectory (and store?) for step in xrange(int(length)): theta, p, grad = self.leapfrog(theta, p, epsilon, grad, check_oob=self.has_bounds) if self.store_trajectories: self.trajectories[-1][step, :] = theta # ---- Accept/Reject --- # Odds ratio of the proposed move lnP = self.lnprob(theta) # change in potential = negative change in lnP dU = lnP0 - lnP # change in kinetic dK = self.kinetic_energy(p) - self.kinetic_energy(p0) # acceptance criterion alpha = np.exp(-dU - dK) if self.verbose: print('H={0}, dU={1}, dK={2}'.format(alpha, dU, dK)) # Accept or reject if np.random.uniform(0, 1) < alpha: accepted = 1.0 return theta, lnP, grad, accepted else: accepted = 0.0 return theta0, lnP0, grad0, accepted def leapfrog(self, q, p, epsilon, grad, check_oob=False): """Perfrom one leapfrog step, updating the momentum and position vectors. This uses one call to the model.lnprob_grad() function, which must be defined. It also performs an optional check on the value of the new position to make sure it satistfies any parameter constraints, for which the check_constrained method of model is called. """ # half step in p p -= 0.5 * epsilon * grad # full step in theta q += epsilon * self.velocity(p) # check for constraints on theta while check_oob: q, sign, check_oob = self.model.check_constrained(q) p *= sign # flip the momentum if necessary # compute new gradient in U, which is negative of gradient in lnP grad = -self.lnprob_grad(q) # another half step in p p -= 0.5 * epsilon * grad return q, p, grad def draw_momentum(self): if self.ndim_mass == 0: p = np.random.normal(0, 1, self.ndim) elif self.ndim_mass == 1: p = np.random.normal(0, np.sqrt(self.mass_matrix)) else: p = np.random.multivariate_normal(np.zeros(self.ndim), self.mass_matrix) return p def velocity(self, p): """Get the velocities given a momentum vector. """ if self.ndim_mass == 0: v = p # Masses all = 1
elif self.ndim_mass == 1: v = self.inverse_mass_matrix * p #v = p else: #v = np.dot
(self.cho_factor, p) v = np.dot(self.inverse_mass_matrix, p) return v def kinetic_energy(self, p): """Get the kinetic energy given momenta """ if self.ndim_mass == 0: K = np.dot(p, p) elif self.ndim_mass == 1: K = np.dot(p, self.inverse_mass_matrix * p) else: K = np.dot(p.T, np.dot(self.inverse_mass_matrix, p)) return 0.5 * K def set_mass_matrix(self, mass_matrix=None): """Cache the inverse of the mass matrix, and set a flag for the dimensionality of the mass matrix. Instead of flags that control operation through branch statements, should probably use subclasses for different types of mass matrix. """ self.mass_matrix = mass_matrix if mass_matrix is None: self.inverse_mass_matrix = 1 self.ndim_mass = 0 elif mass_matrix.ndim == 1: self.inverse_mass_matrix = 1. / mass_matrix self.ndim_mass = 1 elif mass_matrix.ndim == 2: self.inverse_mass_matrix = np.linalg.inv(mass_matrix) self.ndim_mass = 2 print(mass_matrix, self.ndim_mass) def langevin(self): """Special case of length = 1 trajectories""" raise(NotImplementedError) def find_reasonable_stepsize(self, q0, epsilon_guess=1.0): """Estimate a reasonable value of the stepsize """ epsilon = epsilon_guess lnP0, grad0 = self.lnprob(q0.copy()), self.lnprob_grad(q0.copy()) p0 = self.draw_momentum() condition, a, i = True, 0, 0 while condition: p = p0.copy() epsilon = 2.**a * epsilon qprime, pprime, gradprime = self.leapfrog(q0.copy(), p, epsilon, grad0, check_oob=self.has_bounds) lnP = self.lnprob(qprime) # change in potential dU = lnP0 - lnP # change in kinetic dK = self.kinetic_energy(pprime) - self.kinetic_energy(p0) alpha = np.exp(-dU - dK) if a == 0: # this is the first try a = 2 * (alpha > 0.5) - 1.0 # direction to change epsilon in the future, + or - condition = (alpha**a) > (2**(-a)) i += 1 print(i, epsilon, alpha) if alpha is 0.0: raise ValueError('alpha is 0') return epsilon def reset(self): # use this to keep track of the trajectory number within the trajectory # (for storage) self.traj_num = 0 class TestModel(object): """A simple correlated normal distribution to sample. """ def __init__(self, Sigma=None): if Sigma is None: Sigma = np.array([[1., 1.8], [1.8, 4.]]) self.A = np.linalg.inv(Sigma) self.has_constraints = False def lnprob_grad(self, theta): return -np.dot(self.A, theta) def lnprob(self, theta): return 0.5 * np.dot(theta.T, self.lnprob_grad(theta)) class MixModel(object): """A simple line in 2-d space (but constrained) to sample. """ def __init__(self): self.A = np.array([10., 20.]) # constraints self.lower = 0. self.upper = 10. def model(self, theta): # super simple model return (self.A * theta).sum() def lnprob(self, theta): # probability of that simple model given observations (which must be defined) return -0.5 * ((self.model(theta) - self.obs)**2 / self.obs_unc**2).sum() def lnprob_grad(self, theta): # with simple gradients of the probbility grad = -(self.model(theta)-self.obs)/self.obs_unc**2 * self.A return grad def check_constrained(self, theta): """Method that checks the value of theta against constraints. If theta is above or below the boundaries, the sign of the momentum is flipped and theta is adjusted as if the trajectory had bounced off the constraint. Returns the new thet
#!/usr/bin/python2 # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' A script to check that the (Linux) executables produced by gitian only contain allowed gcc, glibc and libstdc++ version symbols. This makes sure they are still compatible with the minimum supported Linux distribution versions. Example usage: find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py ''' from __future__ import division, print_function, unicode_literals import subprocess import re import sys import os # Debian 6.0.9 (Squeeze) has: # # - g++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=g%2B%2B) # - libc version 2.11.3 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libc6) # - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libstdc%2B%2B6) # # Ubuntu 10.04.4 (Lucid Lynx) has: # # - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid&section=all) # - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid&section=all) # - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid&section=all&arch=any&keywords=libstdc%2B%2B&searchon=names) # # Taking the minimum of these as our target. # # According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to: # GCC 4.4.0: GCC_4.4.0 # GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3 # (glibc) GLIBC_2_11 # MAX_VERSIONS = { 'GCC': (4,4,0), 'CXXABI': (1,3,3), 'GLIBCXX': (3,4,13), 'GLIBC': (2,11) } # See here for a description of _IO_stdin_used: # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used' } READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') # Allowed NEEDED libraries ALLOWED_LIBRARIES = { # infinitumd and infinitum-qt b'libgcc_s.so.1', # GCC base support b'libc.so.6', # C library b'libpthread.so.0', # threading b'libanl.so.1', # DNS resolve b'libm.so.6', # math library b'librt.so.1', # real-time (clock) b'ld-linux-x86-64.so.2', # 64-bit dynamic linker b'ld-linux.so.2', # 32-bit dynamic linker # infinitum-qt only b'libX11-xcb.so.1', # part of X11 b'libX11.so.6', # part of X11 b'libxcb.so.1', # part of X11 b'libfontconfig.so.1', # font support b'libfreetype.so.6', # font parsing b'libdl.so.2' # programming interface to dynamic linker } class CPPFilt(object): ''' Demangle C++ symbol names. Use a pipe to the 'c++filt' command. ''' def __init__(self): self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE) def __call__(self, mangled): self.proc.stdin.write(mangled + b'\n') self.proc.stdin.flush() return self.proc.stdout.readline().rstrip() def close(self): self.proc.stdin.close() self.proc.stdout.close() self.proc.wait() def read_symbols(executable, imports=True): ''' Parse an ELF executable and return a list of (symbol,version) tuples for dynamic, imported symbols. ''' p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip())) syms = [] for line in stdout.split(b'\n'): line = line.split() if len(line)>7 and re.match(b'[0-9]+:$', line[0]): (sym, _, version) = line[7].partition(b'@') is_import = line[6] == b'UND' if version.startswith(b'@'): version = version[1:] if is_import == imports: syms.append((sym, version)) return syms def check_version(max_versions, version): if b'_' in version: (lib, _, ver) = version.rpartition(b'_') else: lib = version ver = '0' ver = tuple([int(x) for x in ver.split(b'.')]) if not lib in max_versions: return False return ver <= max_versions[lib] def read_libraries(filename): p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') libraries = [] for line in stdout.split(b'\n'): tokens = line.split() if len(tokens)>2 and tokens[1] == b'(NEEDED)': match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:])) if match: libraries.append(match.group(1)) else: raise ValueError('Unparseable (NEEDED) specification') return libraries if __name__ == '__main__': cppfilt = CPPFilt() retval = 0 for filename in sys.argv[1:]: # Check imported symbols for sym,version in read_symbols(filename, True): if version and not check_version(MAX_VERSIONS, version): print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8'))) retval = 1 # Check exported symbols for sym,version in read_symbols(filename, False): if sym in IGNORE_EXPORTS: continue print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8'))) retval = 1 # Check dependency libraries for library_name in read_libraries(filenam
e): if library_name not in ALLOWED_LIBRARIES: print('%s: NEEDED library
%s is not allowed' % (filename, library_name.decode('utf-8'))) retval = 1 exit(retval)
from __future__ import unicode_literals __all__ = ( 'Key', 'Keys', ) class Key(object): def __init__(self, name): #: Descriptive way of writing keys in configuration files. e.g. <C-A> #: for ``Control-A``. self.name = name def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.name) class Keys(object): Escape = Key('<Escape>') ControlA = Key('<C-A>') ControlB = Key('<C-B>') ControlC = Key('<C-C>') ControlD = Key('<C-D>') ControlE = Key('
<C-E>') ControlF = Key('<C-F>') ControlG = Key('<C-G>') ControlH = Key('<C-H>') ControlI = Key('<C-I>') # Tab ControlJ = Key('<C-J>') # Enter ControlK = Key('<C-K>') ControlL = Key('<C-L>') ControlM = Key('<C-M>') # Enter ControlN = Key('<C-N>') ControlO = Key('<C-O>') ControlP = Key('<C-P>') ControlQ = Key('<C-Q>') ControlR = Key('<C-R>') ControlS = Key('<C-S>') Con
trolT = Key('<C-T>') ControlU = Key('<C-U>') ControlV = Key('<C-V>') ControlW = Key('<C-W>') ControlX = Key('<C-X>') ControlY = Key('<C-Y>') ControlZ = Key('<C-Z>') ControlSpace = Key('<C-Space>') ControlBackslash = Key('<C-Backslash>') ControlSquareClose = Key('<C-SquareClose>') ControlCircumflex = Key('<C-Circumflex>') ControlUnderscore = Key('<C-Underscore>') ControlLeft = Key('<C-Left>') ControlRight = Key('<C-Right>') ControlUp = Key('<C-Up>') ControlDown = Key('<C-Down>') Up = Key('<Up>') Down = Key('<Down>') Right = Key('<Right>') Left = Key('<Left>') Home = Key('<Home>') End = Key('<End>') Delete = Key('<Delete>') ShiftDelete = Key('<ShiftDelete>') PageUp = Key('<PageUp>') PageDown = Key('<PageDown>') BackTab = Key('<BackTab>') # shift + tab Tab = ControlI Backspace = ControlH F1 = Key('<F1>') F2 = Key('<F2>') F3 = Key('<F3>') F4 = Key('<F4>') F5 = Key('<F5>') F6 = Key('<F6>') F7 = Key('<F7>') F8 = Key('<F8>') F9 = Key('<F9>') F10 = Key('<F10>') F11 = Key('<F11>') F12 = Key('<F12>') F13 = Key('<F13>') F14 = Key('<F14>') F15 = Key('<F15>') F16 = Key('<F16>') F17 = Key('<F17>') F18 = Key('<F18>') F19 = Key('<F19>') F20 = Key('<F20>') # Matches any key. Any = Key('<Any>') # Special CPRResponse = Key('<Cursor-Position-Response>')
from django.core.urlresolvers import resolve, Resolver404 from django.test import TestCase from conman.routes import views class RouteRouterViewTest(TestCase): """Test the route_router view.""" def assert_url_uses_router(self, url): """Check a url resolves to the route_router view.""" resolved_view = resolve(url) self.assertEqual(resolved_view.func, views.route_router) def test_blank_url(self): """Blank urls should not resolve. This is actually a test of django, as urls must start with `/`. """ with self.assertRaises(Resolver404): self.assert_url_uses_router('') def test_double_slash_url(self): """Trailing slashes should trail something.""" with self.assertRaises(Resolver404): self.assert_url_uses_router('//') def test_root_url(self): """The root url is resolved using views.route_router.""" self.assert_url_uses_router('/') def test_child_url(self): """A child url is r
esolved using views.route_router.""" self.assert_url_uses_router('/slug/') def test_nested_child_url(self): """A nested child url is resolved using views.route_router.""" self.assert_url_uses_router('/foo/bar/') def
test_numerical_url(self): """A numeric url is resolved using views.route_router.""" self.assert_url_uses_router('/meanings/42/') def test_without_trailing_slash(self): """A url without a trailing slash is not resolved by views.route_router.""" with self.assertRaises(Resolver404): self.assert_url_uses_router('/fail')
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^', 'apps.neo_graph_test.views.create_graph', name='crea
te_graph'), url(r'^', include('apps.citizens.
urls')), url(r'^admin/', include(admin.site.urls)), )
#!/usr/bin/python3 # -*- coding: utf-8 -*- import sys import re import os from urllib.request import urlretrieve from urllib.request import urlopen from urllib.request import build_opener, HTTPCookieProcessor from urllib.parse import urlencode, quote from http.cookiejar import CookieJar from configparser import SafeConfigParser from imghdr import what from bs4 import BeautifulSoup from PIL import Image import pymysql from subprocess import Popen, PIPE from mvdl import * from pixivpy3 import * dlDir = "./images/" dlDir_mov = "./mov/" thumbDir = "./images/thumbnail/" thumb_lDir = "./images/thumbnail_l/" def thumbnail(input_file, output_file): size = 150 img = Image.open(input_file) w,h = img.size l,t,r,b = 0,0,size,size new_w, new_h = size,size if w>=h: new_w = size * w // h l = (new_w - size) // 2 r = new_w - l else: new_h = size * h // w t = (new_h - size) // 2 b = new_h - t thu = img.resize((new_w, new_h), Image.ANTIALIAS) thu = thu.crop((l,t,r,b)) thu.save(thumbDir + output_file, quality=100, optimize=True) thu = img.resize((w*300//h, 300), Image.ANTIALIAS) thu.save(thumb_lDir + output_file, quality=100, optimize=True) def regImg(loc, orig, thum, type, mov=0): nick = "" channel = "" if len(sys.argv) == 4: nick = os.fsencode(sys.argv[2]).decode('utf-8') channel = os.fsencode(sys.argv[3]).decode('utf-8') conn = pymysql.connect(host='127.0.0.1',user='maobot', passwd='msc3824',db='maobot',charset='utf8') cur = conn.cursor() if mov == 0: statement = "INSERT INTO images (user,channel,loc,orig,thum,type) VALUES(%s, %s, %s, %s, %s, %s)" elif mov == 1: statement = "INSERT INTO movies (user,channel,loc,orig,thum,type) VALUES(%s, %s, %s, %s, %s, %s)" data = (nick, channel, loc, orig, thum, type) cur.execute(statement, data) cur.connection.commit() cur.close() conn.close() def readConfig(): config = SafeConfigParser() if os.path.exists('imgdl.ini'): config.read('imgdl.ini') else: print("No Configuration File.") sys.exit(2) try: nicouser = config.get('nicoseiga.jp', 'user') nicopass = config.get('nicoseiga.jp', 'pass') except Exception as e: return "error: could not read nico configuration." + e try: pixiuser = config.get('pixiv.net', 'user') pixipass = config.get('pixiv.net', 'pass') except Exception as e: return "error: could not read pixiv configuration." + e return nicouser, nicopass, pixiuser, pixipass def main(): orig_url = sys.argv[1] html = urlopen(orig_url) nicouser, nicopass, pixiuser, pixipass = readConfig() bsObj = BeautifulSoup(html, "lxml") twi = re.compile('https:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/\d+') nic = re.compile('http:\/\/seiga.nicovideo.jp\/seiga\/[a-zA-Z0-9]+') pix1 = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?mode=medium\&illust_id=[0-9]+') pix2 = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?illust_id=[0-9]+\&mode=medium') pix_ = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?mode=manga_big\&illust_id=[0-9]+\&page=[0-9]+') nico_mov = re.compile('https?:\/\/www.nicovideo.jp\/watch\/[a-zA-Z0-9]+') yout_mov = re.compile('https:\/\/www.youtube.com\/watch\?v=[a-zA-Z0-9]+') image_format = ["jpg", "jpeg", "gif", "png"] if twi.match(orig_url): images = bsObj.find("div", {"class": "permalink-tweet-container"}).find("div", {"class": "AdaptiveMedia-container"}).findAll("div", {"class": "AdaptiveMedia-photoContainer"}) for item in images: imageLoc = item.find("img")["src"] urlretrieve(imageLoc , dlDir + "twi" + imageLoc[28:]) loc = dlDir+"twi"+imageLoc[28:] thumb = "thumb_twi" + imageLoc[28:] type = what(loc) thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) print(thumb_lDir+thumb) elif nic.match(orig_url): opener = build_opener(HTTPCookieProcessor(CookieJar())) post = { 'mail_tel': nicouser, 'password': nicopass } data = urlencode(post).encode("utf_8") response = opener.open('https://secure.nicovideo.jp/secure/login', data) response.close() image_id = orig_url[34:] with opener.open('http://seiga.nicovideo.jp/image/source?id=' + image_id) as response: bsObj = BeautifulSoup(response) imageLoc = bsObj.find("div", {"class": "illust_view_big"}).find("img")["src"] dlLoc = dlDir + "nic" + image_id urlretrieve('http://lohas.nicoseiga.jp' + imageLoc, dlLoc) type = what(dlLoc) loc = dlLoc + "." + type os.rename(dlLoc, loc) thumb = "thumb_nico"+image_id+"."+type print(thumb_lDir+thumb) thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) elif pix1.match(orig_url) or pix2.match(orig_url): imageLocs = [] image_id = re.search('\d+', orig_url).group() api = AppPixivAPI() api.login(pixiuser, pixipass) json_result = api.illust_detail(image_id, req_auth=True
) illust = json_result.illust if "original" in illust.image_urls:
imageLocs.append(illust.image_urls.original) elif "meta_pages" in illust and len(illust.meta_pages)!=0: for i in illust.meta_pages: imageLocs.append(i.image_urls.original) elif "meta_single_page" in illust: imageLocs.append(illust.meta_single_page.original_image_url) # print(imageLocs) for imageLoc in imageLocs: api.download(imageLoc, path=dlDir, name="pix" + imageLoc.split("/")[-1]) loc = dlDir + "pix" + imageLoc.split("/")[-1] type = what(loc) thumb = "thumb_pix"+imageLoc.split("/")[-1] thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) print(thumb_lDir+thumb) elif pix_.match(orig_url): imageLocs = [] reg = re.compile("https?:\/\/www.pixiv.net\/member_illust.php\?mode=manga_big\&illust_id=(\d+)\&page=(\d+)") image_id = int(reg.match(orig_url).group(1)) page = int(reg.match(orig_url).group(2)) api = AppPixivAPI() api.login(pixiuser, pixipass) json_result = api.illust_detail(image_id, req_auth=True) imageLocs.append(json_result.illust.meta_pages[page].image_urls.original) for imageLoc in imageLocs: api.download(imageLoc, path=dlDir, name="pix" + imageLoc.split("/")[-1]) loc = dlDir + "pix" + imageLoc.split("/")[-1] type = what(loc) thumb = "thumb_pix"+imageLoc.split("/")[-1] thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) print(thumb_lDir+thumb) elif nico_mov.match(orig_url): proc = Popen(["./mvdl.py", orig_url], stdout=PIPE, stderr=PIPE) retcode = proc.poll() elif orig_url.split(".")[-1] in image_format: filename = "_".join(quote(orig_url).split("/")[-2:]) if len(filename) > 10: from datetime import datetime filename = datetime.now().strftime('%s') + filename[-10:] loc = dlDir + filename thumb = "thumb_"+filename urlretrieve(orig_url , loc) type = what(loc) if type == None: type = orig_url.split(".")[-1] thumbnail(loc, thumb) print(thumb_lDir+thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) if __name__ == '__main__' : main()
import Gears as gears from .. import * from ..Pif.Base import * class Min(Base) : def applyWit
hArgs( self, spass, functionName, *, pif1 : 'First operand. (Pif.*)' = Pif.Solid( color = 'white' ), pif2 : 'Second operand. (Pif.*)' = Pif.Solid( color = 'white' ) ) : stimulu
s = spass.getStimulus() pif1.apply(spass, functionName + '_op1') pif2.apply(spass, functionName + '_op2') spass.setShaderFunction( name = functionName, src = self.glslEsc( ''' vec3 @<pattern>@ (vec2 x, float time){ return min( @<pattern>@_op1(x), @<pattern>@_op2(x) ); } ''').format( pattern=functionName ) )
from
biohub.core.plugins import PluginC
onfig class BadPluginConfig(PluginConfig): name = 'tests.core.plugins.bad_plugin' title = 'My Plugin' author = 'hsfzxjy' description = 'This is my plugin.' def ready(self): raise ZeroDivisionError
from util.tipo import tipo class S_PARTY_MEMBER_INTERVAL_POS_UPDATE(object): def
__init__(self, tracker, time, direction, opcode, data): print(str(type(self)).split('.')[3
]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
ry_user.get(), entry_pass.get())) for namerow in cur.fetchall(): # print all the first cell fn = namerow[0] #store firstname ln = namerow[1] #store lastname rlb1 = Label(middleFrame, text='\nWelcome %s %s\n' % (fn, ln), font="Arial 14") rlb1.pack() rlb2 = Label(middleFrame, text='\nUserName: %s' % entry_user.get(), font="Arial 14") rlb2.pack() top.mainloop() self.destroy() parent.destroy() go_to_HomePage()''' else: r = Tk() r.title(':D') r.geometry('150x150') rlbl = Label(r, text='\n[!] Invalid Login') rlbl.pack() r.mainloop() dbconn.close() ########################################## SIGN UP SCREEN - GUI #################################################### def SignupScreen(self): global entry_fname global entry_lname global entry_user global entry_pass global entry_repass global entry_email global entry_ACBL global entry_disID top = Toplevel(self) w, h = top.winfo_screenwidth(), top.winfo_screenheight() top.overrideredirect(1) top.geometry("550x450+%d+%d" % (w / 2 - 275, h / 2 - 140)) # 250 #top.configure(background='white') quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20) #topFrame = Frame(top) #topFrame.pack() middleFrame = Frame(top) middleFrame.pack(pady=50) bottomFrame = Frame(top) bottomFrame.pack(side=BOTTOM) # Widgets and which frame they are in #label = Label(topFrame, text="LET'S PLAY BRIDGE") fnameLabel = Label(middleFrame,text = 'First Name:',font="Arial 14") lnameLabel = Label(middleFrame, text='Last Name:',font="Arial 14") userLabel = Label(middleFrame, text='Username:',font="Arial 14") passLabel = Label(middleFrame, text='Password:',font="Arial 14") repassLabel = Label(middleFrame, text='Re-Enter Password:',font="Arial 14") emailLabel = Label(middleFrame, text='Email(optional):',font="Arial 14") ACBLnumLabel = Label(middleFrame, text='ACBLnum(optional):',font="Arial 14") disIDLabel = Label(middleFrame, text='DistrictID(optional):',font="Arial 14") entry_fname = Entry(middleFrame) #For DB entry_lname = Entry(middleFrame) #For DB entry_user = Entry(middleFrame)#For DB entry_pass = Entry(middleFrame, show = '*')#For DB entry_repass = Entry(middleFrame, show = '*')#For DB entry_email = Entry(middleFrame)#For DB entry_ACBL = Entry(middleFrame)#For DB entry_disID = Entry(middleFrame)#For DB b = Button(bottomFrame, text="Sign up", font="Arial 14", command=lambda : combined_Functions(self)) # Location of the Widgets in their frames #label.pack(side="top", fill="both", expand=True, padx=20, pady=20) fnameLabel.grid(row=1, column=0, sticky=W) entry_fname.grid(row=1, column=1) lnameLabel.grid(row=2, column=0, sticky=W) entry_lname.grid(row=2, column=1) userLabel.grid(row=3, column=0, sticky=W) entry_user.grid(row=3, column=1) passLabel.grid(row=4, column=0, sticky=W) entry_pass.grid(row=4, column=1) repassLabel.grid(row=5, column=0, sticky=W) entry_repass.grid(row=5, column=1) emailLabel.grid(row=6, column=0, sticky=W) entry_email.grid(row=6, column=1, padx=20, sticky= W) ACBLnumLabel.grid(row=7, column=0, sticky=W) entry_ACBL.grid(row=7, column=1, padx=20) disIDLabel.grid(row=8, column=0, sticky=W) entry_disID.grid(row=8, column=1) b.grid(row=
10, columnspan=2) ####################################DATABASE Check if Username is available, check if passwords Matc
h -> if so SIGN UP!!!!!!!!!!!!!!! def get_Signup_input(): var = dbConnect() dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db) cur = dbconn.cursor() # Cursor object - required to execute all queries cur.execute("SELECT username FROM playerinfo WHERE username = '%s'" % entry_user.get()) rows = cur.fetchall() if not rows: # print(userInput + " is available") if (entry_pass.get() == entry_repass.get()) and (entry_pass.get()!= "") and (entry_repass.get()!= ""): # print("passwords match, good job brotha") # INSERT new player ... playerinfo check todaysdate = datetime.datetime.today().strftime('%Y-%m-%d') # current date cur.execute("INSERT INTO playerinfo(username, password, signUpDate, firstname, lastname, email, ACLnum, districtID) VALUES('%s','%s','%s','%s','%s','%s','%s','%s')" % ( entry_user.get(), entry_pass.get(), todaysdate, entry_fname.get(), entry_lname.get(), entry_email.get(),entry_ACBL.get(), entry_disID.get())) #get new player's ID cur.execute("SELECT ID FROM playerinfo WHERE username='%s'" % entry_user.get()) for namerow in cur.fetchall(): # print all the first cell idNum = namerow[0] # store ID number # new player's...playerstats inserted by ID cur.execute("INSERT INTO playerstats(ID) VALUES('%s')" % idNum) dbconn.commit() #database commit aka save r = Tk() r.title(':D') r.geometry('150x150') rlbl = Label(r, text='\n[+] Signed Up!') rlbl.pack() r.mainloop() else: # print("passwords don't match bruh or are NULL") r = Tk() r.title(':D') r.geometry('150x150') rlbl = Label(r, text='\n[!] Retype your passwords') rlbl.pack() r.mainloop() else: r = Tk() r.title(':D') r.geometry('150x150') rlbl = Label(r, text='\n[!] Username Not Available ') rlbl.pack() r.mainloop() dbconn.close() def go_to_Tutorial(): window = Toplevel() window.geometry("600x500") quitButton = Button(window, text="Cancel", font="Arial 14", command= window.destroy).pack(side="bottom", padx=20) top_Frame = Frame(window) top_Frame.pack() tLabel = Label(top_Frame, text="TUTORIAL", font="Arial 36").pack(side="top", fill="both", expand=True, padx=20, pady=20) def combined_Functions(self): # for the Sign Up button - store data, exits Sign Up screen, goes to Tutorial screen get_Signup_input() # top.destroy() #go_to_Tutorial() #####################################My Profile - GUI ######################################### def myProfileScreen(self): top = Toplevel(self) w, h = top.winfo_screenwidth(), top.winfo_screenheight() top.overrideredirect(1) w, h = self.winfo_screenwidth(), self.winfo_screenheight() top.overrideredirect(1) top.geometry("%dx%d+0+0" % (w, h)) topFrame = Frame(top) topFrame.pack() bottomFrame = Frame(top) bottomFrame.pack(side=BOTTOM) rightFrame = Frame(top) rightFrame.pack(side= RIGHT) leftFrame = Frame(top) leftFrame.pack(side=LEFT) #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@DB stuff@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ #entry_user.get() //username var = dbConnect() dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db) cur = dbconn.cursor() # Cursor object - requir
""" Copy RWIS data from iem database to its final resting home in 'rwis' The RWIS data is partitioned by UTC timestamp Run at 0Z and 12Z, provided with a timestamp to process """ import
datetime import sys import psycopg2.extras from pyiem.util import get_dbconn, utc def main(argv): """Go main""" iemdb = get_dbconn("iem") rwisdb = get_dbconn("rwis") ts = utc(int(argv[1]), int(argv[2]), int(argv[3])) ts2 = ts + datetime.timedelta(hours=24) rcursor = rwisdb.cursor() # Remove previous entrie
s for this UTC date for suffix in ["", "_soil", "_traffic"]: rcursor.execute( f"DELETE from t{ts.year}{suffix} WHERE valid >= %s and valid < %s", (ts, ts2), ) rcursor.close() # Always delete stuff 3 or more days old from iemaccess icursor = iemdb.cursor() icursor.execute( "DELETE from rwis_traffic_data_log WHERE " "valid < ('TODAY'::date - '3 days'::interval)" ) icursor.execute( "DELETE from rwis_soil_data_log WHERE " "valid < ('TODAY'::date - '3 days'::interval)" ) icursor.close() # Get traffic obs from access icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor) icursor.execute( """SELECT l.nwsli as station, s.lane_id, d.* from rwis_traffic_data_log d, rwis_locations l, rwis_traffic_sensors s WHERE s.id = d.sensor_id and valid >= '%s' and valid < '%s' and s.location_id = l.id""" % (ts, ts2) ) rows = icursor.fetchall() if not rows: print("No RWIS traffic found between %s and %s" % (ts, ts2)) icursor.close() # Write to archive rcursor = rwisdb.cursor() rcursor.executemany( f"""INSERT into t{ts.year}_traffic (station, valid, lane_id, avg_speed, avg_headway, normal_vol, long_vol, occupancy) VALUES (%(station)s,%(valid)s, %(lane_id)s, %(avg_speed)s, %(avg_headway)s, %(normal_vol)s, %(long_vol)s, %(occupancy)s) """, rows, ) rcursor.close() # Get soil obs from access icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor) sql = """SELECT l.nwsli as station, d.valid, max(case when sensor_id = 1 then temp else null end) as tmpf_1in, max(case when sensor_id = 3 then temp else null end) as tmpf_3in, max(case when sensor_id = 6 then temp else null end) as tmpf_6in, max(case when sensor_id = 9 then temp else null end) as tmpf_9in, max(case when sensor_id = 12 then temp else null end) as tmpf_12in, max(case when sensor_id = 18 then temp else null end) as tmpf_18in, max(case when sensor_id = 24 then temp else null end) as tmpf_24in, max(case when sensor_id = 30 then temp else null end) as tmpf_30in, max(case when sensor_id = 36 then temp else null end) as tmpf_36in, max(case when sensor_id = 42 then temp else null end) as tmpf_42in, max(case when sensor_id = 48 then temp else null end) as tmpf_48in, max(case when sensor_id = 54 then temp else null end) as tmpf_54in, max(case when sensor_id = 60 then temp else null end) as tmpf_60in, max(case when sensor_id = 66 then temp else null end) as tmpf_66in, max(case when sensor_id = 72 then temp else null end) as tmpf_72in from rwis_soil_data_log d, rwis_locations l WHERE valid >= '%s' and valid < '%s' and d.location_id = l.id GROUP by station, valid""" % ( ts, ts2, ) icursor.execute(sql) rows = icursor.fetchall() if not rows: print("No RWIS soil obs found between %s and %s" % (ts, ts2)) icursor.close() # Write to RWIS Archive rcursor = rwisdb.cursor() rcursor.executemany( f"""INSERT into t{ts.year}_soil (station, valid, tmpf_1in, tmpf_3in, tmpf_6in, tmpf_9in, tmpf_12in, tmpf_18in, tmpf_24in, tmpf_30in, tmpf_36in, tmpf_42in, tmpf_48in, tmpf_54in, tmpf_60in, tmpf_66in, tmpf_72in) VALUES ( %(station)s,%(valid)s, %(tmpf_1in)s, %(tmpf_3in)s, %(tmpf_6in)s, %(tmpf_9in)s, %(tmpf_12in)s, %(tmpf_18in)s, %(tmpf_24in)s, %(tmpf_30in)s, %(tmpf_36in)s, %(tmpf_42in)s, %(tmpf_48in)s, %(tmpf_54in)s, %(tmpf_60in)s, %(tmpf_66in)s, %(tmpf_72in)s) """, rows, ) rcursor.close() # Get regular obs from Access icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor) # Since we store drct in the RWIS archive as NaN, we better make sure # we don't attempt to use these values as it will error out icursor.execute("update current_log set drct = null where drct = 'NaN'") sql = """SELECT c.*, t.id as station from current_log c, stations t WHERE valid >= '%s' and valid < '%s' and t.network ~* 'RWIS' and t.iemid = c.iemid""" % ( ts, ts2, ) icursor.execute(sql) rows = icursor.fetchall() if not rows: print("No RWIS obs found between %s and %s" % (ts, ts2)) icursor.close() # Write to RWIS Archive rcursor = rwisdb.cursor() rcursor.executemany( f"""INSERT into t{ts.year} (station, valid, tmpf, dwpf, drct, sknt, tfs0, tfs1, tfs2, tfs3, subf, gust, tfs0_text, tfs1_text, tfs2_text, tfs3_text, pcpn, vsby) VALUES (%(station)s, %(valid)s,%(tmpf)s,%(dwpf)s,%(drct)s,%(sknt)s,%(tsf0)s, %(tsf1)s,%(tsf2)s,%(tsf3)s,%(rwis_subf)s,%(gust)s,%(scond0)s, %(scond1)s,%(scond2)s,%(scond3)s,%(pday)s,%(vsby)s)""", rows, ) rcursor.close() rwisdb.commit() iemdb.commit() rwisdb.close() iemdb.close() if __name__ == "__main__": main(sys.argv)
IR, None) if not src: src = getattr(Utils.g_module, SRCDIR, None) if not src: src = getattr(Utils.g_module, 'top', None) if not src: src = '.' incomplete_src = 1 src = os.path.abspath(src) bld = getattr(Options.options, BLDDIR, None) if not bld: bld = getattr(Utils.g_module, BLDDIR, None) if not bld: bld = getattr(Utils.g_module, 'out', None) if not bld: bld = 'build' incomplete_bld = 1 if bld == '.': raise Utils.WafError('Setting blddir="." may cause distclean problems') bld = os.path.abspath(bld) try: os.makedirs(bld) except OSError: pass # It is not possible to compile specific targets in the configuration # this may cause configuration errors if autoconfig is set targets = Options.options.compile_targets Options.options.compile_targets = None Options.is_install = False conf.srcdir = src conf.blddir = bld conf.post_init() if 'incomplete_src' in vars(): conf.check_message_1('Setting srcdir to') conf.check_message_2(src) if 'incomplete_bld' in vars(): conf.check_message_1('Setting blddir to') conf.check_message_2(bld) # calling to main wscript's configure() conf.sub_config(['']) conf.store() # this will write a configure lock so that subsequent builds will # consider the current path as the root directory (see prepare_impl). # to remove: use 'waf distclean' env = Environment.Environment() env[BLDDIR] = bld env[SRCDIR] = src env['argv'] = sys.argv env['commands'] = Options.commands env['options'] = Options.options.__dict__ # conf.hash & conf.files hold wscript files paths and hash # (used only by Configure.autoconfig) env['hash'] = conf.hash env['files'] = conf.files env['environ'] = dict(conf.environ) env['cwd'] = os.path.split(Utils.g_module.root_path)[0] if Utils.g_module.root_path != src: # in case the source dir is somewhere else env.store(os.path.join(src, Options.lockfile)) env.store(Options.lockfile) Options.options.compile_targets = targets def clean(bld): '''removes the build files''' try: proj = Environment.Environment(Options.lockfile) except IOError: raise Utils.WafError('Nothing to clean (project not configured)') bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() bld.is_install = 0 # False # read the scripts - and set the path to the wscript path (useful for srcdir='/foo/bar') bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]]) try: bld.clean() finally: bld.save() def check_configured(bld): if not Configure.autoconfig: return bld conf_cls = getattr(Utils.g_module, 'configure_context', Utils.Context) bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context) def reconf(proj): back = (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) Options.commands = proj['commands'] Options.options.__dict__ = proj['options'] conf = conf_cls() conf.environ = proj['environ'] configure(conf) (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) = back try: proj = Environment.Environment(Options.lockfile) except IOError: conf = conf_cls() configure(conf) else: try: bld = bld_cls() bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() except Utils.WafError: reconf(proj) return bld_cls() try: proj = Environment.Environment(Options.lockfile) except IOError: raise Utils.WafError('Auto-config: project does not configure (bug)') h = 0 try: for file in proj['files']: if file.endswith('configure'): h = hash((h, Utils.readf(file))) else: mod = Utils.load_module(file) h = hash((h, mod.waf_hash_val)) except (OSError, IOError): warn('Reconfiguring the project: a file is unavailable') reconf(proj) else: if (h != proj['hash']): warn('Reconfiguring the project: the configuration has changed') reconf(proj) return bld_cls() def install(bld): '''installs the build files''' bld = check_configured(bld) Options.commands['install'] = True Options.commands['uninstall'] = False Options.is_install = True bld.is_install = INSTALL build_impl(bld) bld.install() def uninstall(bld): '''removes the installed files''' Options.commands['install'] = False Options.commands['uninstall'] = True Options.is_install = True bld.is_install = UNINSTALL try: def runnable_status(self): return SKIP_ME setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status) setattr(Task.Task, 'runnable_status', runnable_status) build_impl(bld) bld.install() finally: setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back) def build(bld): bld = check_configured(bld) Options.commands['install'] = False Options.commands['uninstall'] = False Options.is_install = False bld.is_install = 0 # False return build_impl(bld) def build_impl(bld): # compile the project and/or inst
all the files try: proj = Environment.Environment(Optio
ns.lockfile) except IOError: raise Utils.WafError("Project not configured (run 'waf configure' first)") bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() info("Waf: Entering directory `%s'" % bld.bldnode.abspath()) bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]]) # execute something immediately before the build starts bld.pre_build() try: bld.compile() finally: if Options.options.progress_bar: print('') info("Waf: Leaving directory `%s'" % bld.bldnode.abspath()) # execute something immediately after a successful build bld.post_build() bld.install() excludes = '.bzr .bzrignore .git .gitignore .svn CVS .cvsignore .arch-ids {arch} SCCS BitKeeper .hg _MTN _darcs Makefile Makefile.in config.log .gitattributes .hgignore .hgtags'.split() dist_exts = '~ .rej .orig .pyc .pyo .bak .tar.bz2 tar.gz .zip .swp'.split() def dont_dist(name, src, build_dir): global excludes, dist_exts if (name.startswith(',,') or name.startswith('++') or name.startswith('.waf') or (src == '.' and name == Options.lockfile) or name in excludes or name == build_dir ): return True for ext in dist_exts: if name.endswith(ext): return True return False # like shutil.copytree # exclude files and to raise exceptions immediately def copytree(src, dst, build_dir): names = os.listdir(src) os.makedirs(dst) for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) if dont_dist(name, src, build_dir): continue if os.path.isdir(srcname): copytree(srcname, dstname, build_dir) else: shutil.copy2(srcname, dstname) # TODO in waf 1.6, change this method if "srcdir == blddir" is allowed def distclean(ctx=None): '''removes the build directory''' global commands lst = os.listdir('.') for f in lst: if f == Options.lockfile: try: proj = Environment.Environment(f) except: Logs.warn('could not read %r' % f) continue try: shutil.rmtree(proj[BLDDIR]) except IOError: pass except OSError, e: if e.errno != errno.ENOENT: Logs.warn('project %r cannot be removed' % proj[BLDDIR]) try: os.remove(f) except OSError, e: if e.errno != errno.ENOENT: Logs.warn('file %r cannot be removed' % f) # remove the local waf cache if not commands and f.startswith('.waf'): shutil.rmtree(f, ignore_errors=True) # FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version def dist(appname='', version=''): '''makes a tarball for redistributing the sources''' # return return (distdirname, tarballname) import tarfile if not appname: appname = Utils.g_module.APPNAME if not version: version = Utils.g_module.VERSION tmp_folder = appname + '-' + version if g_gz in ['gz', 'bz2']: arch_name = tmp_folder + '.tar.' + g_gz else: arch_name = tmp_folder + '.' + 'zip' # remove the previous dir try: shutil.rmtree(tmp_folder) except (OSError, IOError): pass # remove the previous archive try: os.remove(arch_name) except (OSError, IOError): pass # copy the files into the temporary folder blddir = getattr(Utils.g_module, BLDDIR, None) if not blddir: blddir = getattr(Utils.g_module, 'out', None) copytree('.', tmp_folder, blddir) # undocumented hook for additional cleanup dist_hook =
import time from pymongo import MongoClient from datetime import datetime, timedelta import json from bson import Binary, Code from bson.json_util import dumps client = MongoClient('localhost', 27017) db = client['election-2016'] def dumpData(yesterdayStr): collectionName = 't' + yesterdayStr cursor = db[collectionName].find() count = cursor.count() print(collectionName + ' found ' + str(count) + ' tweets') # dump only if data count is greater than 0 if count > 0: file = open('out/' + yesterdayStr + '.json', 'w') file.write('[') i = 0
for document in cursor: doc = dumps(document) file.write(doc) if (i != count - 1): file.write(',\n') else:
file.write('\n]') i = i + 1 print('data for ' + yesterdayStr + ' successfully dumped at ' + str(now)) # Run following code when the program starts if __name__ == '__main__': currentDate = str(datetime.now().month) + '_' + str(datetime.now().day) #get now and yesterday strings now = datetime.now() yesterday = now - timedelta(days=1) yesterdayStr = str(yesterday.month) + '_' + str(yesterday.day) #update currentDate dumpData(yesterdayStr)
""" Settings for testing the application. """ import os DEBUG = True DJANGO_RDFLIB_DEVELOP = True DB_PATH = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'rdflib_django.db')) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': DB_PATH, 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } SITE_ID = 1 STATIC_URL = '/static/' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'django.contr
ib.admindocs', 'rdflib_django', ) ROOT_URLCONF = 'rdflib_django.urls' LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'simple': { 'format': '%(levelname)s %(mes
sage)s' }, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, }, 'loggers': { '': { 'handlers': ['console'], 'propagate': True, 'level': 'INFO', }, } }
from vint.ast.traversing import traverse, register_traverser_extension from vint.ast.parsing import Parser from vint.ast.node_type import NodeType REDIR_CONTENT = 'VINT:redir_content' class RedirAssignmentParser(object): """ A class to make redir assignment parseable. """ def process(self,
ast): def enter_handler(node): node_type = NodeType(node['type']) if node_type is not NodeType.EXCMD: return is_redir_command = node['ea']['cmd'].get('name') == 'redi
r' if not is_redir_command: return redir_cmd_str = node['str'] is_redir_assignment = '=>' in redir_cmd_str if not is_redir_assignment: return parser = Parser() redir_content_node = parser.parse_redir(node) node[REDIR_CONTENT] = redir_content_node traverse(ast, on_enter=enter_handler) return ast def get_redir_content(node): return node.get(REDIR_CONTENT) @register_traverser_extension def traverse_redir_content(node, on_enter=None, on_leave=None): if REDIR_CONTENT not in node: return traverse(node[REDIR_CONTENT], on_enter=on_enter, on_leave=on_leave)
# Used for when precision or recall == 0 to supress warnings def warn(*args, **kwargs): pass import warnings warnings.warn = warn import numpy as np import sklearn_crfsuite from sklearn.metrics import make_scorer, confusion_matrix from sklearn_crfsuite import metrics from sklearn_crfsuite.utils import flatten from sklearn.model_selection import cross_validate, cross_val_predict, StratifiedKFold from collections import Counter from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from chemdataextractor.doc import Paragraph from Tools import TextTools stop_word_list = set(stopwords.words('english')) wordnet_lemmatizer = WordNetLemmatizer() chem_ents = [] def main(): train_docs = TextTools.loadNER("ASC") train_sents = [] for index, row in train_docs.iterrows(): for word in row['text']: train_sents.append(word) chem_ents = extract_chem_entities(train_sents) X = [sent2features(s,chem_ents) for s in train_sents] y = [sent2labels(s) for s in train_sents] crf = sklearn_crfsuite.CRF( algorithm='lbfgs', c1=0.1, c2=0.1, all_possible_transitions=True) crf.fit(X, y) # List of labels removing the non-entity classes labels = list(crf.classes_) labels.remove('O') NUMBER_OF_FOLDS = 5 scoreers = { "f1_scores": make_scorer(metrics.flat_f1_score, average='weighted', labels=labels), "precision_scores": make_scorer(metrics.flat_precision_score, average='weighted', labels=labels), "recall_scores": make_scorer(metrics.flat_recall_score, average='weighted', labels=labels), } scores = cross_validate(crf, X, y, cv=NUMBER_OF_FOLDS, scoring=scoreers, return_train_score=False, n_jobs=-1) f1_scores = scores['test_f1_scores'] precision_scores = scores['test_precision_scores'] recall_scores = scores['test_recall_scores'] for x in range(NUMBER_OF_FOLDS): print("Fold number: ", x) print("Precision: ", precision_scores[x]) print("Recall: ", recall_scores[x]) print("F1 Score: ", f1_scores[x]) print("\n") print("Averages Across Folds") print("Precision: ", np.average(np.array(precision_scores))) print("Recall: ", np.average(np.array(recall_scores))) print("F1 Score: ", np.average(np.array(f1_scores))) y_pred = cross_val_predict(crf, X, y, cv=NUMBER_OF_FOLDS) conf_mat = confusion_matrix(flatten(y), flatten(y_pred)) print("\nConfusion Matrix\n") print(" ".join(["NonEntity
", "CoreComposition", "Precursor", "ReducingAgent", "Solvent", "Stabilizer"])) print(conf_mat) print("Top positive:") print_state_features(Counter(crf.state_features_).most_common(30)) print("\nTop negative:") print_state_features(Counter(crf.state
_features_).most_common()[-30:]) def extract_chem_entities(sents): document_text = [[str(w[0]) for w in s] for s in sents] document_text = [" ".join(s) for s in document_text] document_text = " ".join(document_text) paragraph = Paragraph(document_text) chem_entities = paragraph.cems chem_entities = [c.text for c in chem_entities] return chem_entities def print_state_features(state_features): for (attr, label), weight in state_features: print("%0.6f %-8s %s" % (weight, label, attr)) def word2features(sent, word_position): SENTENCE_BEGGINING = 0 SENTENCE_END = len(sent) - 1 word = sent[word_position][0] pos = sent[word_position][1] features = featureize(word, pos) if word_position == SENTENCE_BEGGINING: features.append('BOS') if word_position > SENTENCE_BEGGINING: previous_word = sent[word_position-1][0] previous_pos = sent[word_position-1][1] features.extend(featureize(previous_word, previous_pos, relation="-1")) if word_position < SENTENCE_END: next_word = sent[word_position+1][0] next_pos = sent[word_position+1][1] features.extend(featureize(next_word, next_pos, relation="+1")) if word_position == SENTENCE_END: features.append('EOS') return features def featureize(word, postag, relation=""): suffix = word[-3:] prefix = word[:3] return [ relation + 'word.lower=' + word.lower(), relation + 'word.isupper=%s' % word.isupper(), relation + 'word.istitle=%s' % word.istitle(), relation + 'word.isdigit=%s' % word.isdigit(), relation + 'word.postag=%s' % postag, relation + 'word.prefix=%s' % prefix, relation + 'word.suffix=%s' % suffix, relation + 'word.lemma=%s' % wordnet_lemmatizer.lemmatize(word), relation + 'word.ischem=%s' % (word in chem_ents), relation + 'word.containsdigit=%s' % contains_digit(word), ] def sent2features(sent, chem_ents): return [word2features(sent, i) for i in range(len(sent))] def sent2labels(sent): return [label for token, postag, label in sent] def contains_digit(s): return any(i.isdigit() for i in s) if __name__ == "__main__": main()
'''Biblioteca que contém as rotinas de coversão dos diferentes tipos de máquinas. Autor: Lucas Possatti ''' import re import collections def mealy_to_moore(me): '''Converte o parâmetro 'me' (que deve ser uma máquina Mealy) para uma máquina de Moore, que é retornada. ''' # Verifica se a máquina recebida, realemente é mealy. if me[0] != 'mealy': raise 'O método mealy_to_moore esperava receber uma máquina de mealy como entrada.' # Cria a máquina de moore. moo = ['moore'] #!# # Procura as trasições com destino a cada um dos estados, para #!# # verificar se há mais de uma transição que destina a um único estado. #!# for state in me[3][1:]: #!# state_trans_outputs = set() #!# for trans in me[6][1:]: #!# if state == trans[1]: #!# pass # Inicia um dicionário, com todos os estados como chaves, e um conjunto # vazio para seus valores. state_outputs = collections.OrderedDict() for state in me[3][1:]: state_outputs[state] = [] # Busca as saídas que são geradas com a transição para cada um dos estados. for trans in me[6][1:]: # Verifica se o estado de destino está no dicionário 'state_outputs'. if trans[1] not in state_outputs: raise "Some transition state destination is not declared in the machine definition (states section). Malformed machine definition." # Adiciona a saída a lista do estado, somente se já não tiver sido adicionada. if trans[3] not in state_outputs[trans[1]]: state_outputs[trans[1]].append(trans[3]) # Define quais serão os novos estados na máquina de moore. moore_states = [] out_fn = [] for state in state_outputs: # Se o estado tem mais de um output if len(state_outputs[state]) > 1: # Itera sobre cada um dos outputs desse estado, para gerar os # novos estados que forem necessários. Acrescentando '*' para # cada novo estado criado. i = 0 for output in state_outputs[state]: # Gera o nome para o novo estado. new_state = state + '*'*i # Adiciona o estado, a lista de estados da nova máquina moore_states.append(new_state) # Forma a tupla para a função de saída (out-fn). out_fn.append([new_state, output]) i += 1 # Se o estado tem um único output. elif len(state_outputs[state]) == 1: # Adiciona o estado, a lista de estados da nova máquina moore_states.append(state) # Pega a única saída desse estado. output = state_outputs[state][0] # Forma a tupla para a função de saída (out-fn). out_fn.append([state, output]) # Caso o estado não tenha qualquer output (como por exemplo, se # não houver qualquer transição com destino a ele). else: # Adiciona o estado, a lista de estados da nova máquina moore_states.append(state) # Forma a tupla para a função de saída (out-fn), no caso # o estado não tem qualquer saída. out_fn.append([state, []]) # Gera as transições necessárias para a máquina de moore. moore_trans = [] for trans in me[6][1:]: for new_state in moore_states: for fn in out_fn: #!#print(trans, ":", new_state, ":", fn, "=", re.match("^" + trans[1] + r"\**", new_state) and re.match("^" + trans[1] + r"\**", fn[0]) and trans[3] == fn[1])#!# # Usa os vários dados já obtidos para verificar como as # transições para a máquina de moore devem ser criadas # e quais delas devem ser consideradas. if re.match("^" + trans[1] + r"\**", new_state) and re.match("^" + trans[1] + r"\**", fn[0]) and trans[3] == fn[1]: # Forma a transição que será adicionada. temp_trans = [trans[0], fn[0], trans[2]] # Adciona a nova transição, somente se ele já não tiver # sido adicionada. if temp_trans not in moore_trans: moore_trans.append(temp_trans) # Define os estados que são finais. Que a princípio, são todos os da # máquina de mealy, com a diferença de que é necessário observar se # os estados que foram criados (com '*') também são finais. moore_finals = [] for final in me[5][1:]: for moo_state in moore_states: if re.match("^" + final + r"\**", moo_state): moore_finals.append(moo_state) moo.append(["symbols-in"] + me[1][1:]) moo.append(["symbols-out"] + me[2][1:]) moo.append(["states"] + moore_states) moo.append(["start"] + [me[4][1]]) moo.append(["finals"] + moore_finals) moo.append(["trans"] + moore_trans) moo.append(["out-fn"] + out_fn) #!# print('\nDEBUG:') #!# print('me[0]', me[0]) #!# print('me[1]', me[1]) #!# print('me[2]', me[2]) #!# print('me[3]', me[3]) #!# print('me[4]', me[4]) #!# print('me[5]', me[5]) #!# print('me[6]', me[6]) #!# print(':END DEBUG\n') return moo def moore_to_mealy(moo): '''Converte o parâmetro 'moo' (que deve ser uma máquina Moore) para uma máquina de Mealy, que é retornada. ''' # Verifica se a máquina recebida, realemente é moore. if moo[0] != 'moore': raise 'O método moore_to_mealy esperava receber uma máquina de moore como entrada.' # Cria a máquina de mealy. me = ['mealy'] # Repete os simbolos de entrada e de entrada. me.append(['symbols-in'] + moo[1][1:]) me.append(moo[2]) # Repete os estados porém adicionando o 'qe'. estados = [moo[3][0]] + ['qe'] + moo[3][1: ] me.append(estados) # O estado inicial é 'qe'. me.append(['start', 'qe']) # Os estados finais são os mesmos. me.append(moo[5]) # Traduz as transições e saídas da máquina de moore para mealy. mealy_trans = [] moore_trans = moo[6][1:] moore_outfn = moo[7][1:] for trans in moore_trans: # Busca a saída para aquela mudança de estado. mealy_trans_output = None for out in moore_outfn: if out[0] == trans[1]: mealy_trans_output = out[1] # Forma a transição no formato mealy. mealy_trans_stage = [trans[0], trans[1], trans[2], mealy_trans_output] # Se a transiçã
o for do estado inicial, precisamos adicionalemente # acrescenta-la como transição do estado 'qe' if mealy_trans_stage[0] == moo[4][1]: mealy_trans.append(['qe'] + mealy_trans_stage[1:]) # E adiciona ao conjunto de transições da máquina mealy. mealy_trans.append(mealy_trans_stage) # Coloca as transações da mealy dentro da máquina. me.append(['trans'] + mealy_trans) #!# print('DEBUG:') #!# print('moo[0]', moo[0]) #!# p
rint('moo[1]', moo[1]) #!# print('moo[2]', moo[2]) #!# print('moo[3]', moo[3]) #!# print('moo[4]', moo[4]) #!# print('moo[5]', moo[5]) #!# print('moo[6]', moo[6]) #!# print('moo[7]', moo[7][0:-1]) #!# print(':END DEBUG') return me
# -*- encoding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import from enocean.protocol.eep import EEP eep = EEP() # profiles = eep. def test_first_range(): offset = -40 values = range(0x01, 0x0C) for i in range(len(values)): minimum = float(i * 10 + offset) maximum = minimum + 40 profile = eep.find_profile([], 0xA5, 0x02, values[i]) assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text) def test_second_range(): offset = -60 values = range(0x10, 0x1C) for
i in range(len(values)): minimum = float(i * 10 + offset) maximum = minimum + 80 profile = eep.find_profile([], 0xA5, 0x02, values[i]) assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text) def te
st_rest(): profile = eep.find_profile([], 0xA5, 0x02, 0x20) assert -10 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert +41.2 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text) profile = eep.find_profile([], 0xA5, 0x02, 0x30) assert -40 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert +62.3 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
jaccard = 2. else: jaccard = float(j["intersection"]) / float(j["union"]) return [[1. - jaccard]] def recall_dist_fn(graph1, graph2, options): """ assymmetric version of above to compute recall of graph1 on graph2 return recall to be consistent with other functions where similar is smaller. """ jpath = comp_path(graph1, graph2, options) with open(jpath) as f: j = json.loads(f.read()) if index_path(graph2, options) == j["db2_path"]: denom = float(j["db2_total"]) else: assert index_path(graph2, options) == j["db1_path"] denom = float(j["db1_total"]) intersection = float(j["intersection"]) recall = intersection / denom return [[recall]] def precision_dist_fn(graph1, graph2, options): """ get 1 - precision of graph1 on graph2 """ return recall_dist_fn(graph2, graph1, options) def corg_dist_fn(graph1, graph2, options): """ scrape corg dist from corg output """ cpath = corg_path(min(graph1, graph2), max(graph1, graph2), options) with open(cpath) as f: c = f.readline().strip() dist = float(c) return [[dist]] def vcf_dist_fn(graph1, graph2, options): """ scrape vcfCompare data""" jpath = comp_path_vcf(graph1, graph2, options) with open(jpath) as f: j = json.loads(f.read()) path1 = j["Path1"] path2 = j["Path2"] query_vcf_path = preprocessed_vcf_path(graph1, options) # we expect graph2 to be a baseline graph region2, sample2, method2 = options.tags[graph2] assert method2 in ["g1kvcf", "platvcf"] truth_vcf_path = preprocessed_vcf_path(graph2, options) # do we need to flip ever? assert path1 == query_vcf_path assert path2 == truth_vcf_path return [[j["Alts"]["SNP"]["Precision"], j["Alts"]["SNP"]["Recall"], j["Alts"]["MULTIBASE_SNP"]["Precision"], j["Alts"]["MULTIBASE_SNP"]["Recall"], j["Alts"]["INDEL"]["Precision"], j["Alts"]["INDEL"]["Recall"], j["Alts"]["TOTAL"]["Precision"], j["Alts"]["TOTAL"]["Recall"], 0]] def vcf_dist_header(options): """ header""" return ["SNP-Precision", "SNP-Recall", "MB-Precision", "MB-Recall", "INDEL-Precision", "INDEL-Recall", "TOT-Precision", "TOT-Recall", "QUAL"] def sompy_dist_fn(graph1, graph2, options): jpath = comp_path_sompy(graph1, graph2, options) header = None snps = None indels = None total = None # read sompy csv output. be a little flexible in terms of row column order (but not names!) RealTimeLogger.get().warning(jpath) with open(jpath) as f: for line in f: toks = line.split(",") if len(toks) < 2: continue if toks[1] == "type": header = toks rec_idx = toks.index("recall") prec_idx = toks.index("precision") elif toks[1] == "indels": indels = toks elif toks[1] == "SNVs": snps = toks elif toks[1] == "records": total = toks # indels optional if indels is None: indels = [0] * 100 # shoehorn into vcfCompre style output (todo, revise this) return [[snps[prec_idx], snps[rec_idx], 0, 0, indels[prec_idx], indels[rec_idx], total[prec_idx], total[rec_idx], 0]] def happy_dist_fn(graph1, graph2, options): jpath = comp_path_happy(graph1, graph2, options) header = None snps = None indels = None total = None if options.roc is True and options.tags[graph1][2] in ["gatk3", "platypus", "g1kvcf", "freebayes", "samtools"]: # read happy roc output. # todo : indels and total: problem= rocs have differen numbers of lines wwhich doesnt fit interface as is snp_roc_path = jpath.replace("summary.csv", "roc.snp.all.tsv") rows = [] with open(snp_roc_path) as f: for line in f: toks = line.split() if "precision" in toks: prec_idx = toks.index("precision") rec_idx = toks.index("recall") else: rows.append([toks[prec_idx], toks[rec_idx], 0, 0, 0, 0, 0, 0]) return rows else: # read happy csv output. be a little flexible in terms of row column order (but not names!) with open(jpath) as f: for line in f: toks = line.strip().split(",") if len(toks) < 2: continue if "METRIC.Recall" in toks: header = toks rec_idx = toks.index("METRIC.Recall") prec_idx = toks.index("METRIC.Precision") elif toks[0] == "INDEL": indels = toks elif toks[0] == "SNP": snps = toks elif toks[0] == "Locations": total = toks # indels optional if indels is None: indels = [0] * 100 # toal optioal if total is None: total = [0] * 100 # shoehorn into vcfCompre style output (todo, revise this) return [[snps[prec_idx], snps[rec_idx], 0, 0, indels[prec_idx], indels[rec_idx], total[prec_idx], total[rec_idx], 0]]
def save_vcfeval_stats(out_path, fn_table, fp_table, tp_table): """ write some summary counts from the vceval vcf output """ def write_table(t, name):
with open(os.path.join(out_path, "comp_counts_{}.tsv".format(name)), "w") as f: for line in t: f.write("{}\t{}\t{}\t{}\n".format(line[0], line[1], line[2], line[3])) write_table(fn_table, "fn") write_table(fp_table, "fp") write_table(tp_table, "tp") def load_vcfeval_stats(out_path): """ read the counts back from file as list """ def read_table(name): t = [] with open(os.path.join(out_path, "comp_counts_{}.tsv".format(name))) as f: for line in f: if len(line) > 0: row = line.split("\t") t += [[float(row[0]), int(row[1]), int(row[2]), int(row[3])]] return t fn_table = read_table("fn") fp_table = read_table("fp") tp_table = read_table("tp") balance_tables(fn_table, fp_table, tp_table) return fn_table, fp_table, tp_table def vcf_num_records(vcf_path, bed_path = None): """ use bcftools stats to get the number of snps indels other in vcf """ if not os.path.exists(vcf_path): return -1 cmd = "bcftools stats {}".format(vcf_path) if bed_path is not None: cmd += " -R {}".format(bed_path) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1) output, _ = p.communicate() assert p.wait() == 0 hits = 0 for line in output.split("\n"): toks = line.split("\t") if len(toks) == 4 and toks[0] == "SN": if toks[2] == "number of SNPs:": num_snps = int(toks[3]) hits += 1 if toks[2] == "number of MNPs:": num_mnps = int(toks[3]) hits += 1 elif toks[2] == "number of indels:": num_indels = int(toks[3]) hits += 1 elif toks[2] == "number of records:": hits += 1 num_records = int(toks[3]) elif toks[2] == "number of others:": hits += 1 num_other = int(tok
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php """Python 2<->3 compatibility module""" import sys def print_(template, *args, **kwargs): template = str(template) if args: template = template % args elif kwargs: template = template % kwargs sys.stdout.writelines(template) if sys.version_info < (3, 0): basestring = basestring from ConfigParser import ConfigParser from urllib import unquote iteritems = lambda d: d.iteritems() dictkeys = lambda d: d.keys() def reraise(t, e, tb): exec(
'rais
e t, e, tb', dict(t=t, e=e, tb=tb)) else: basestring = str from configparser import ConfigParser from urllib.parse import unquote iteritems = lambda d: d.items() dictkeys = lambda d: list(d.keys()) def reraise(t, e, tb): raise e.with_traceback(tb)
place this file in the same folder as your ghdata folder. #to run this, type "python pythonBlameHistoryTree.py" into the command prompt #You will see some output about running on 127.0.0.1:5000 in the command prompt #Open a web browser and navigate to 127.0.0.1:5000. #This page will load for quite a while. At least several minutes is expected. #You can see it is still running due to the testing output in the command prompt Outer loop: commit# Inner loop: commit# #When the testing output stops running you should see some output in the browser tab. #the output shows the commit number and date, the total lines of code and other files (for example, the readme) #and the percentage written by each organization. #expected output for ghdata should show only the spdx-tools organization (Matt is a member) #Number of lines corresponds to the lines written by Matt. #You can see that earlier commits are lower on the page, and chronologically later ones appear higher up. #An "error" I expect us to encounter when testing other repos: #The way my sql query works right now, a user can be a member of multiple organizations. #For a simple case of expected output problems: #User1 wrote the entire repository (100%) #User1 is a member of Microsoft and IBM #Microsoft wrote 100% of the repository. IBM also wrote 100% of the repository for a total of 200% #Other issues: #If a user does not have both an email and organization available in GHTorrent database, #the user will not be counted towards any organization. #Future changes planned for this file: #Code cleanup for better readability #Code commenting for each portion #Thorough testing for various potential cases we might encounter #Deciding for certain how to decide whether a user is a member of an organization #A better method of dealing with local repository rather than deleting each time and re-downloading #Not having the database password directly in the code #Look into improving code efficiency where possible for faster runtime from flask import Flask from git import * import sqlalchemy from sqlalchemy import text import shutil import os import stat import time app = Flask(__name__) @app.route("/") def pythonBlameHistory(): #path is the hardcoded folder for the last download of ghdata repo_path = './ghdata' #We must remove the old ghdata if we want to download a new copy. #In order to delete it, we must first change the permissions #To be writable for all files and directories. #Based on this: http://s
tackoverflow.com/questions/2853723/whats-the-python-way-for-recursively-setting-file-permissions if os.path.exists(repo_path): for root, directories, files in os.walk(repo_path): for directory in directories: os.chmod(os.path.join(root, directory), stat.S_IWRITE) for file in files: os.chmod(os.path.join(root, file), stat.S_IWRITE) os.chmod(repo_path, stat.S_IWRITE)
#delete the old ghdata shutil.rmtree(repo_path) #connect to the database username:password@hostname:port/databasename db = sqlalchemy.create_engine('mysql+pymysql://root:password@localhost:3306/msr14') schema = sqlalchemy.MetaData() schema.reflect(bind=db) #Get the ghdata repository from GitHub repo = Repo.init('ghdata') origin = repo.create_remote('origin','https://github.com/OSSHealth/ghdata.git') origin.fetch() origin.pull(origin.refs[0].remote_head) #Dictionary to store results of sql queries #associating emails with organizations. #Without this dictionary, we would have to repeat #the same query over and over, which on my local machine #meant a runtime of over 24 hours (as opposed to several minutes using the dictionary) orgs_associated_with_user = {} #This dictionary keeps track of the lines written per organization for a single file. lines_per_organization_per_file = {} #This is the total number of lines in a single file total_lines_in_file = 0 #this is used later to hold percentage results for output percentage = 0 #This is the total number of lines in an entire repo total_lines_in_repo = 0 #This dictionary keeps track of the lines written per organization for the entire repo. lines_per_organization_entire_repo = {} #The output string will be displayed to the screen once everything is done running. outputString = "" #Outer loop: loop through each commit in the master branch. #This corresponds to the history of commits over time. for history_commit in repo.iter_commits('master'): #Since we want to see the change over time in repo percentage by organization, #clear the variables for total lines and organization lines for each new commit #we examine. lines_per_organization_entire_repo = {} total_lines_in_repo = 0 #Testing output: only purpose is to show you it's still running :) print("Outer loop: " + str(history_commit)) #Now loop through every file in the repo. #You cannot use the os library file/directory loop for this part. #(as was used above to change file permissions) #That is because some files do not exist in every commit. #You must loop through the commit tree, not the ghdata directory. for file_in_repo in history_commit.tree.traverse(): #For each file, we want to clear out the total lines and organization totals per file. #That's because we're starting over with a new file. lines_per_organization_per_file = {} total_lines_in_file = 0 #Files are of the blob type. This if statement prevents us from trying #to examine 'lines' in a directory. if file_in_repo.type == 'blob': #Now for each file, perform git blame. This will traverse #the lines in the file. #You can see there are now two variables of type commit: #history_commit and blame_commit (will improve variable naming in a future update) #history_commit is the commit with respect to the overall repo history. #blame_commit is the commit in which this line was most recently changed #as obtained through git blame. We use the "blame_commit" variable #to obtain the author of the commit for when the lines were last changed. for blame_commit, lines in repo.blame(history_commit, file_in_repo.path): #Git blame does not always return one line at a time. #Sometimes we are returned several lines committed by the same author. #In that case, we must count how many lines there are or our #total will not match the actual file. blameLineCount = 0 for line in lines: #increment lines to later attribute to an organization. blameLineCount += 1 #increment lines in the file as a whole total_lines_in_file += 1 #Testing output: only shows that things are still running. print("Inner loop: " + str(blame_commit)) #Get the email address of the author of this commit. #If we already have it in our dictionary, increase the total #lines for the associated organization by blameLineCount if blame_commit.author.email in orgs_associated_with_user: for organization in orgs_associated_with_user[blame_commit.author.email]: if organization not in lines_per_organization_per_file: lines_per_organization_per_file[organization] = blameLineCount else: lines_per_organization_per_file[organization] += blameLineCount #If the email address is not in our dictionary, we must query #the database to get any ass
# Copyright (c) 2014 Ahmed H. Ismail # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of t
he License at # http://www.apache.org/licenses/LICEN
SE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
#!/usr/bin/env python # -*- coding: ascii -*- from subprocess import Popen, PIPE import threading import select import logging import fcntl import time import sys import os TTY_OPTS="-icrnl -onlcr -imaxbel -opost -isig -icanon -echo line 0 kill ^H min 100 time 2 brkint 115200" READERS = [] WRITERS = [] SELECT_TO = 0.1 def tty_set_opts(dev, opts): """Set tty options""" cmd = ["stty", "-F", dev] + opts.split(" ") prcs = Popen(cmd, stdout=PIPE,stderr=PIPE) out, err = prcs.communicate() if out: logging.info(out) if err: logging.error(err) return prcs.returncode class TTYWorker(threading.Thread): def __init__(self, dev, root): threading.Thread.__init__(self) self.tty = os.path.basename(dev) self.dev = dev self.root = root self.keep_running = True def stop(self): self.keep_running = False def run(self): raise Exception("Not implemented") cl
ass TTYReader(TTYWorker): """Reads tty output to file""" def run(self): tty_out_path = os.sep.join([self.root, "%s.log" % se
lf.tty]) logging.info("tty_out_path(%s)" % tty_out_path) while self.keep_running: err = not os.path.exists(self.dev) if err: logging.error("dev(%s) does not exist" % self.dev) time.sleep(1) continue err = not os.path.exists(self.root) if err: logging.error("root(%s) does not exist" % self.root) time.sleep(1) continue err = tty_set_opts(self.dev, TTY_OPTS) if err: logging.error("failed stty err(%d)", err) time.sleep(1) continue try: with open(self.dev, "rb", 0) as dev_r, \ open(tty_out_path, "ab", 0) as tty_out: while self.keep_running and \ os.fstat(dev_r.fileno()).st_nlink and \ os.fstat(tty_out.fileno()).st_nlink: ready, _, _ = select.select( [dev_r.fileno()], [], [], SELECT_TO ) if not ready: continue logging.debug("dev_r.read(1)") payload = dev_r.read(1) logging.debug("dev_r.read(1) -- DONE") if payload is None: break logging.debug("tty_out.write") tty_out.write(payload) logging.debug("tty_out.write -- DONE") except: logging.error("error(%s)" % str(sys.exc_info())) class TTYWriter(TTYWorker): """Write commands to tty""" def run(self): tty_in_path = os.sep.join([self.root, "%s.in" % self.tty]) logging.info("tty_in(%s)" % tty_in_path) while self.keep_running: err = not os.path.exists(self.dev) if err: logging.error("dev(%s) does not exist" % self.dev) time.sleep(1) continue err = not os.path.exists(self.root) if err: logging.error("root(%s) does not exist" % self.root) time.sleep(1) continue err = not os.path.exists(tty_in_path) if err: logging.error("tty_in_path(%s) does not exist" % tty_in_path) time.sleep(1) continue err = tty_set_opts(self.dev, TTY_OPTS) if err: logging.error("failed stty err(%d)", err) time.sleep(1) continue try: with open(self.dev, "a", 0) as dev_w, \ open(tty_in_path, "r", 0) as tty_in: tty_in.seek(0, 2) while self.keep_running and \ os.fstat(dev_w.fileno()).st_nlink and \ os.fstat(tty_in.fileno()).st_nlink: ready, _, _ = select.select( [tty_in.fileno()], [], [], SELECT_TO ) if not ready: continue line = tty_in.readline() if not line: continue logging.debug("dev_w.write") dev_w.write(line.strip()) logging.debug("dev_w.write -- DONE") time.sleep(0.1) logging.debug("dev_w.write CR") dev_w.write('\r') logging.debug("dev_w.write CR -- DONE") except: logging.error("error(%s)" % str(sys.exc_info())) def main(cfg, state): """Entry point for wtty-iod""" logging.critical("Starting...") for tty in cfg["devices"]: READERS.append(TTYReader(tty, cfg["roots"]["reader"])) WRITERS.append(TTYWriter(tty, cfg["roots"]["writer"])) logging.info("Starting workers") for worker in READERS + WRITERS: worker.start() logging.critical("Working...") while (state["keep_running"]): time.sleep(0.1) logging.info("Stopping") for i, worker in enumerate(WRITERS + READERS): logging.debug("Stopping i(%d)" % i) worker.stop() logging.info("Joining") for i, worker in enumerate(WRITERS + READERS): logging.debug("Joining i(%d)" % i) worker.join() logging.critical("Stopped.")
from __future__ import division, absolute_import, print_function,\ unicode_literals import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup, Extension from distutils.core import Extension from distutils.errors import DistutilsError from distutils.command.build_ext import build_ext with open(os.path.join('nanomsg','version.py')) as f: exec(f.read()) class skippable_build_ext(build_ext): def run(self): try: build_ext.run(self) except Exception as e: print() print("=" * 79) print("WARNING : CPython API extension could not be built.")
print() print("Exception was : %r" % (e,)) print() print( "If you need the extensions (they may be faster than " "alternative on some" ) print(" platforms) check you have a compiler configured with all" " the necessary") print(" headers and libraries.") print("=" * 79) print() try: import ctypes if sys.platform in ('win32',
'cygwin'): _lib = ctypes.windll.nanoconfig else: _lib = ctypes.cdll.LoadLibrary('libnanoconfig.so') except OSError: # Building without nanoconfig cpy_extension = Extension(str('_nanomsg_cpy'), sources=[str('_nanomsg_cpy/wrapper.c')], libraries=[str('nanomsg')], ) else: # Building with nanoconfig cpy_extension = Extension(str('_nanomsg_cpy'), define_macros=[('WITH_NANOCONFIG', '1')], sources=[str('_nanomsg_cpy/wrapper.c')], libraries=[str('nanomsg'), str('nanoconfig')], ) install_requires = [] try: import importlib except ImportError: install_requires.append('importlib') setup( name='nanomsg', version=__version__, packages=[str('nanomsg'), str('_nanomsg_ctypes'), str('nanomsg_wrappers')], ext_modules=[cpy_extension], cmdclass = {'build_ext': skippable_build_ext}, install_requires=install_requires, description='Python library for nanomsg.', classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", ], author='Tony Simpson', author_email='agjasimpson@gmail.com', url='https://github.com/tonysimpson/nanomsg-python', keywords=['nanomsg', 'driver'], license='MIT', test_suite="tests", )
# -*- coding: utf-8 -*- """The application's model objects""" from zope.sqlalchemy import ZopeTransactionExtension from sqlalchemy.orm import scoped_session, sessionmaker # from sqlalchemy import MetaData from sqlalchemy.ext.declarative import declarative_base # Global session manager: DBSession() returns the Thread-local # session object appropriate for the current web request. maker = sessionmaker( autoflush = True, autocommit = False, extension = ZopeTransactionExtension() ) DBSession = scoped_session( maker ) # Base class for all of our model classes: By default, the data model is # defined with SQLAlchemy's declarative extension, but if you need more # control, you can switch to the traditional method. DeclarativeBase = declarative_base() # There are two convenient ways for you to spare s
ome typing. # You can have a query property on all your model classes by doing this: # DeclarativeBase.query = DBSession.query_property() # Or you can use a session-aw
are mapper as it was used in TurboGears 1: # DeclarativeBase = declarative_base(mapper=DBSession.mapper) # Global metadata. # The default metadata is the one from the declarative base. metadata = DeclarativeBase.metadata # If you have multiple databases with overlapping table names, you'll need a # metadata for each database. Feel free to rename 'metadata2'. # metadata2 = MetaData() ##### # Generally you will not want to define your table's mappers, and data objects # here in __init__ but will want to create modules them in the model directory # and import them at the bottom of this file. # ###### def init_model( engine ): """Call me before using any of the tables or classes in the model.""" DBSession.configure( bind = engine ) # If you are using reflection to introspect your database and create # table objects for you, your tables must be defined and mapped inside # the init_model function, so that the engine is available if you # use the model outside tg2, you need to make sure this is called before # you use the model. # # See the following example: # global t_reflected # t_reflected = Table("Reflected", metadata, # autoload=True, autoload_with=engine) # mapper(Reflected, t_reflected) # Import your model modules here. from auth import User, Group, Permission from logic import * from sysutil import * from fileutil import *
# -*- coding: utf8 -*- # This file is part of Mnemosyne. # # Copyright (C) 2013 Daniel Lombraña González # # Mnemosyne is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mnemosyne is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Mnemosyne. If not, see <http://www.gnu.org/licenses/>. """ Package for creating the Flask application. This exports: - create_app a function that creates the Flask application """ from flask import Flask from mnemosyne.frontend import frontend from mnemosyne.model import db try: import mnemosyne.settings as settings except: print "Settings file is missing" def create_app(db_name=None, testing=False): """ Create the Flask app object after configuring it. Keyword arguments: db_name -- Database name testing -- Enable/Disable testing mode Return value: app -- Flask application object """ try: app = Flask(__name__) app.config.from_object(settings) except: print "Settings file is missing, trying with env config..." app.config.from_envvar('MNEMOSYNE_SETTINGS', silent=False) if db_name: app.config['SQLALCHEMY_DATABASE_URI'] = db_name db.init_app(app) app.register_blueprint(frontend) return app
from django.contrib.contenttypes.models import ContentType from lfs.core.utils import import_symbol from lfs.criteria.models import Criterion import logging logger = logging.getLogger(__name__) # DEPRECATED 0.8 def is_valid(request, object, product=None): """ Returns True if the given object is valid. This is calculated via the attached criteria. Passed object is an object which can have criteria. At the moment these are discounts, shipping/payment methods and shipping/payment prices. """ logger.info("Decprecated: lfs.criteria.utils.is_valid: this function is deprecated. Please use the Criteria class instead.") for criterion_object in get_criteria(object): criterion_object.request = request criterion_object.product = product if criterion_object.is_valid() is False: return False return True # DEPRECATED 0.8 def get_criteria(object): """ Returns all criteria for given object. """ logger.info("Decprecated: lfs.c
riteria.utils.get_criteria: this function is deprecated. Please use the Criteria class instead.") content_type = ContentType.objects.get_for_model(object) criteria = [] for criterion in Criterion.objects.filter(content_id=object.id, content_type=content_type): criteria.append(criterion.get_content_object()) return criteria def get_first_valid(request, objects, product=None):
""" Returns the first valid object of given objects. Passed object is an object which can have criteria. At the moment these are discounts, shipping/payment methods and shipping/payment prices. """ for object in objects: if object.is_valid(request, product): return object return None # DEPRECATED 0.8 def save_criteria(request, object): """ Saves the criteria for the given object. The criteria are passed via request body. """ logger.info("Decprecated: lfs.criteria.utils.save_criteria: this function is deprecated. Please use the Criteria class instead.") # First we delete all existing criteria objects for the given object. for co in get_criteria(object): co.delete() # Then we add all passed criteria to the object. for key, model in request.POST.items(): if key.startswith("type"): try: id = key.split("-")[1] except KeyError: continue # Get the values for the criterion operator = request.POST.get("operator-%s" % id) position = request.POST.get("position-%s" % id) criterion_class = import_symbol(model) criterion = criterion_class.objects.create(content=object, operator=operator, position=position) if criterion.get_value_type() == criterion.MULTIPLE_SELECT: value = request.POST.getlist("value-%s" % id) else: value = request.POST.get("value-%s" % id) criterion.update(value)
""" Load the CCGOIS datasets into a CKAN instance """ import dc import json import slugify import ffs def make_name_from_title(title): # For some reason, we're finding duplicate names name = slugify.slugify(title).lower()[:99] if not name.startswith('ccgois-'): name = u"ccgois-{}".format(name) return name def load_ccgois(datasets): for metadata in datasets: resources = [ dict( description=r['description'], name=r['name'], format=r['filetype'], url=r['url'] ) for r in metadata['resources'] ] print [r['name'] for r in
metadata['resources']] metadata['title'] = u'CCGOIS - {}'.format(metadata['title']) metadata['name'] = make_name_from_title(metadata['title']) print u'Creating {}'.format(metadata['name']) dc.Dataset.create_or_update( name=metadata['name'], title=metadata['title'], state='active', license_id='uk-ogl', notes=metadata['descr
iption'], origin='https://indicators.ic.nhs.uk/webview/', tags=dc.tags(*metadata['keyword(s)']), resources=resources, #frequency=[metadata['frequency'], ], owner_org='hscic', extras=[ dict(key='frequency', value=metadata.get('frequency', '')), dict(key='coverage_start_date', value=metadata['coverage_start_date']), dict(key='coverage_end_date', value=metadata['coverage_end_date']), dict(key='domain', value=metadata['domain']), dict(key='origin', value='HSCIC'), dict(key='next_version_due', value=metadata['next version due']), dict(key='nhs_OF_indicators', value=metadata['nhs_of_indicators']), dict(key='HSCIC_unique_id', value=metadata['unique identifier']), dict(key='homepage', value=metadata['homepage']), dict(key='status', value=metadata['status']), dict(key='language', value=metadata['language']), dict(key='assurance_level', value=metadata['assurance_level']), dict(key='release_date', value=metadata['current version uploaded']) ] ) return def group_ccgois(datasets): for metadata in datasets: dataset_name = make_name_from_title(metadata['title']) try: dataset = dc.ckan.action.package_show(id=dataset_name) except: print "Failed to find dataset: {}".format(dataset_name) print "Can't add to group" continue if [g for g in dataset.get('groups', []) if g['name'] == 'ccgois']: print 'Already in group', g['name'] else: dc.ckan.action.member_create( id='ccgois', object=dataset_name, object_type='package', capacity='member' ) return def main(workspace): DATA_DIR = ffs.Path(workspace) datasets = json.load(open(DATA_DIR / 'ccgois_indicators.json')) dc.ensure_publisher('hscic') dc.ensure_group('ccgois') load_ccgois(datasets) group_ccgois(datasets)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class NamespaceCreateOrUpdateParameters(Resource): """Parameters supplied to the CreateOrUpdate Namespace operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id :vartype id: str :ivar name: Resource name :vartype name: str :ivar type: Resource type :vartype type: str :param location: Resource location :type location: str :param tags: Resource tags :type tags: dict :param sku: The sku of the created namespace :type sku: :class:`Sku <azure.mgmt.notificationhubs.models.Sku>` :param namespace_create_or_update_parameters_name: The name of the namespace. :type namespace_create_or_update_parameters_name: str :param provisioning_state: Provisioning state of the Namespace. :type provisioning_state: str :param region: Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia EastAustralia SoutheastCentral USEast USEast US 2West USNorth Central USSouth Central USEast AsiaSoutheast AsiaBrazil SouthJapan EastJapan WestNorth EuropeWest Europe :type region: str :param status: Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting :type status: str :param created_at: The time the namespace was created. :type created_at: datetime :param service_bus_endpoint: Endpoint you can use to perform NotificationHub operations. :type service_bus_endpoint: str :param subscription_id: The Id of the Azure subscription associated with the namespace. :type subscription_id: str :param scale_unit: ScaleUnit where the namespace gets created :type scale_unit: str :param enabled: Whether or not the namespace is currently enabled. :type enabled: bool :param critical: Whether or not the namespace is set as Critical. :type critical: bool :param namespace_type: The namespace type. Possible values include: 'Messaging', 'NotificationHub' :type namespace_type: str or :class:`NamespaceType <azure.mgmt.notificationhubs.models.NamespaceType>` """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'namespace_create_or_update_parameters_name': {'key': 'properties.name', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'region': {'key': 'properties.region', 'type': 'str'
}, 'status': {'key': 'properties.status', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, 'service_bus_endpoint': {'key': 'properties.serviceBusEndpoint', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, 'scale_unit': {'key': 'properties
.scaleUnit', 'type': 'str'}, 'enabled': {'key': 'properties.enabled', 'type': 'bool'}, 'critical': {'key': 'properties.critical', 'type': 'bool'}, 'namespace_type': {'key': 'properties.namespaceType', 'type': 'NamespaceType'}, } def __init__(self, location, tags=None, sku=None, namespace_create_or_update_parameters_name=None, provisioning_state=None, region=None, status=None, created_at=None, service_bus_endpoint=None, subscription_id=None, scale_unit=None, enabled=None, critical=None, namespace_type=None): super(NamespaceCreateOrUpdateParameters, self).__init__(location=location, tags=tags, sku=sku) self.namespace_create_or_update_parameters_name = namespace_create_or_update_parameters_name self.provisioning_state = provisioning_state self.region = region self.status = status self.created_at = created_at self.service_bus_endpoint = service_bus_endpoint self.subscription_id = subscription_id self.scale_unit = scale_unit self.enabled = enabled self.critical = critical self.namespace_type = namespace_type
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved. # Copyright (c) 2015, Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # - Neither the name of Salesforce.com nor the names of its contributors # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRES
S OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INT
ERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy def uniform(min_val, max_val, point_count): grid = numpy.array(range(point_count)) + 0.5 grid *= (max_val - min_val) / float(point_count) grid += min_val return grid def center_heavy(min_val, max_val, point_count): grid = uniform(-1, 1, point_count) grid = numpy.arcsin(grid) / numpy.pi + 0.5 grid *= max_val - min_val grid += min_val return grid def left_heavy(min_val, max_val, point_count): grid = uniform(0, 1, point_count) grid = grid ** 2 grid *= max_val - min_val grid += min_val return grid def right_heavy(min_val, max_val, point_count): grid = left_heavy(max_val, min_val, point_count) return grid[::-1].copy() def pitman_yor( min_alpha=0.1, max_alpha=100, min_d=0, max_d=0.5, alpha_count=20, d_count=10): ''' For d = 0, this degenerates to the CRP, where the expected number of tables is: E[table_count] = O(alpha log(customer_count)) ''' min_alpha = float(min_alpha) max_alpha = float(max_alpha) min_d = float(min_d) max_d = float(max_d) lower_triangle = [ (x, y) for x in center_heavy(0, 1, alpha_count) for y in left_heavy(0, 1, d_count) if x + y < 1 ] alpha = lambda x: min_alpha * (max_alpha / min_alpha) ** x d = lambda y: min_d + (max_d - min_d) * y grid = [ {'alpha': alpha(x), 'd': d(y)} for (x, y) in lower_triangle ] return grid
io_handler.write_line("No service provides '{0}'", specification) return False else: # Print'em all io_handler.write(self._utils.make_table(headers, lines)) io_handler.write_line("{0} services registered", len(lines)) def __extract_help(self, method): """ Formats the help string for the given method :param method: The method to document :return: A tuple: (arguments list, documentation line) """ if method is None: return "(No associated method)" # Get the arguments argspec = inspect.getargspec(method) # Compute the number of arguments with default value if argspec.defaults is not None: nb_optional = len(argspec.defaults) # Let the mandatory arguments as they are args = ["<{0}>".format(arg) for arg in argspec.args[2:-nb_optional]] # Add the other arguments for name, value in zip(argspec.args[-nb_optional:], argspec.defaults[-nb_optional:]): if value is not None: args.append('[<{0}>={1}]'.format(name, value)) else: args.append('[<{0}>]'.format(name)) else: # All arguments are mandatory args = ["<{0}>".format(arg) for arg in argspec.args[2:]] # Extra arguments if argspec.keywords: args.append('[<property=value> ...]') if argspec.varargs: args.append("...") # Get the documentation string doc = inspect.getdoc(method) or "(Documentation missing)" return ' '.join(args), ' '.join(doc.split()) def __print_command_help(self, io_handler, namespace, cmd_name): """ Prints the documentation of the given command :param io_handler: I/O handler :param namespace: Name space of the command :param cmd_name: Name of the command """ # E
xtract documentation args, doc = self._
_extract_help(self._commands[namespace][cmd_name]) # Print the command name, and its arguments if args: io_handler.write_line("- {0} {1}", cmd_name, args) else: io_handler.write_line("- {0}", cmd_name) # Print the documentation line io_handler.write_line("\t\t{0}", doc) def __print_namespace_help(self, io_handler, namespace, cmd_name=None): """ Prints the documentation of all the commands in the given name space, or only of the given command :param io_handler: I/O Handler :param namespace: Name space of the command :param cmd_name: Name of the command to show, None to show them all """ io_handler.write_line("=== Name space '{0}' ===", namespace) # Get all commands in this name space if cmd_name is None: names = [command for command in self._commands[namespace]] names.sort() else: names = [cmd_name] first_cmd = True for command in names: if not first_cmd: # Print an empty line io_handler.write_line('\n') self.__print_command_help(io_handler, namespace, command) first_cmd = False def print_help(self, io_handler, command=None): """ Prints the available methods and their documentation, or the documentation of the given command. """ if command: # Single command mode if command in self._commands: # Argument is a name space self.__print_namespace_help(io_handler, command) was_namespace = True else: was_namespace = False # Also print the name of matching commands try: # Extract command name space and name possibilities = self.get_ns_commands(command) except ValueError as ex: # Unknown command if not was_namespace: # ... and no name space were matching either -> error io_handler.write_line(str(ex)) return False else: # Print the help of the found command if was_namespace: # Give some space io_handler.write_line('\n\n') for namespace, cmd_name in possibilities: self.__print_namespace_help(io_handler, namespace, cmd_name) else: # Get all name spaces namespaces = list(self._commands.keys()) namespaces.remove(DEFAULT_NAMESPACE) namespaces.sort() namespaces.insert(0, DEFAULT_NAMESPACE) first_ns = True for namespace in namespaces: if not first_ns: # Add empty lines io_handler.write_line('\n\n') # Print the help of all commands self.__print_namespace_help(io_handler, namespace) first_ns = False def properties_list(self, io_handler): """ Lists the properties of the framework """ # Get the framework framework = self._context.get_bundle(0) # Head of the table headers = ('Property Name', 'Value') # Lines lines = [item for item in framework.get_properties().items()] # Sort lines lines.sort() # Print the table io_handler.write(self._utils.make_table(headers, lines)) def property_value(self, io_handler, name): """ Prints the value of the given property, looking into framework properties then environment variables. """ value = self._context.get_property(name) if value is None: # Avoid printing "None" value = "" io_handler.write_line(str(value)) def environment_list(self, io_handler): """ Lists the framework process environment variables """ # Head of the table headers = ('Environment Variable', 'Value') # Lines lines = [item for item in os.environ.items()] # Sort lines lines.sort() # Print the table io_handler.write(self._utils.make_table(headers, lines)) def environment_value(self, io_handler, name): """ Prints the value of the given environment variable """ io_handler.write_line(os.getenv(name)) def threads_list(self, io_handler): """ Lists the active threads and their current code line """ try: # Extract frames frames = sys._current_frames() except AttributeError: io_handler.write_line("sys._current_frames() is not available.") return # Get the thread ID -> Thread mapping names = threading._active.copy() # Sort by thread ID thread_ids = list(frames.keys()) thread_ids.sort() lines = [] for thread_id in thread_ids: # Get the corresponding stack stack = frames[thread_id] # Try to get the thread name try: name = names[thread_id].name except KeyError: name = "<unknown>" # Construct the code position lines.append('Thread ID: {0} - Name: {1}'.format(thread_id, name)) lines.append('Line:') lines.extend(line.rstrip() for line in traceback.format_stack(stack, 1)) lines.append('') lines.append('') # Sort the lines io_handler.write('\n'.join(lines)) def thread_details(self, io_handler, thread_id): """ Prints details about the thread with the given ID (not its name) """ try: # Get the stack thread_id = int(thre
from .depth import * from .camera import * from .contact impor
t * from .imagefeature impor
t * from .arduino import *
# -*- coding: utf-8 -*- """ # This is authentication backend for Django middleware. # In settings.py you need to set: MIDDLEWARE_CLASSES = ( ... 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.RemoteUserMiddleware', ... ) AUTHENTICATION_BACKENDS = ( 'kobo.django.auth.krb5.RemoteUserBackend', ) # Add login and logout adresses to urls.py: urlpatterns = patterns("", ... url(r'^auth/krb5login/$', django.views.generic.TemplateView.as_view(template = 'auth/krb5login.html'), url(r'^auth/logout/$', 'django.contrib.auth.views.logout', kwargs={"next_page": "/"}), ... ) # Set a httpd config to protect krb5login page with kerberos. # You need to have mod_auth_kerb installed to use kerberos auth. # Httpd config /etc/httpd/conf.d/<project>.conf should look like this: <Location "/"> SetHandler python-program PythonHandler django.core.handlers.modpython SetEnv DJANGO_S
ETTINGS_MODULE <project>.s
ettings PythonDebug On </Location> <Location "/auth/krb5login"> AuthType Kerberos AuthName "<project> Kerberos Authentication" KrbMethodNegotiate on KrbMethodK5Passwd off KrbServiceName HTTP KrbAuthRealms EXAMPLE.COM Krb5Keytab /etc/httpd/conf/http.<hostname>.keytab KrbSaveCredentials off Require valid-user </Location> """ from django.contrib.auth.backends import RemoteUserBackend class Krb5RemoteUserBackend(RemoteUserBackend): def clean_username(self, username): # remove @REALM from username return username.split("@")[0]
from distutils.core im
port setup from setuptools import setup, find_packages setup( name = 'gooeydist', packages = find_packages(), # this must be the same as the name above version = '0.2', description = 'Gooey Language', author = 'Gooey Comps', author_email = 'harrise@carleton.edu', url = 'https://github.com/GooeyComps/gooey-dist', # use the URL to the github repo download_url = 'https://github.com/GooeyComps/gooey-dist/tarball/0.2', # I'll explain this in a second keywords = ['gui'], # arb
itrary keywords classifiers = [], )
# -
*- coding: utf-8 -*- from loading import load_plugins, register_plugin from plugz import PluginTypeBase from plugintypes import StandardPluginType __author__ = 'Matti Gruener' __email__ = 'matti@mistermatti.com' __version__ = '0.1.5' __ALL__ = [load_plugins,
register_plugin, StandardPluginType, PluginTypeBase]
''' go list comprehensions ''' def main(): a = []i
nt(x for x in range(3)) TestError( len(a)==3 ) TestError( a[0]==0 ) TestError( a[1]==1 ) TestError( a[2]==
2 )
# -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2017-09-14 23:53 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):
dependencies = [ ('organization', '0004_teacher_image'), ('courses', '0006_auto_20170914_2345'), ] operations = [ migrations.AddField( model_name='course', name='teacher', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizatio
n.Teacher', verbose_name='\u8bb2\u5e08'), ), ]
l, rtag end """ def testSendECS(self): # First send an ECS query with routingTag self.setRoutingTag('foo') expected1 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.2.0/24') ecso = clientsubnetoption.ClientSubnetOption('192.0.2.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expected1) # Now check a cache hit with the same routingTag (but no ECS) query = dns.message.make_query(nameECS, 'TXT', 'IN') self.checkECSQueryHit(query, expected1) expected2 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '127.0.0.0/24') # And see if a different tag does *not* hit the first one self.setRoutingTag('bar') query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) # And see if a *no* tag does *not* hit the first one expected3 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.3.0/24') self.setRoutingTag(None) ecso = clientsubnetoption.ClientSubnetOption('192.0.3.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expected3) # And see if an unknown tag from the same subnet does hit the last self.setRoutingTag('baz') ecso = clientsubnetoption.ClientSubnetOption('192.0.3.2', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.checkECSQueryHit(query, expected3) # And a no tag and no subnet query does hit the general case self.setRoutingTag(None) query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) # And a unknown tag and no subnet query does hit the general case self.setRoutingTag('bag') query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) return # remove this line to peek at cache rec_controlCmd = [os.environ['RECCONTROL'], '--config-dir=%s' % 'configs/' + self._confdir, 'dump-cache', 'x'] try: expected = b'dumped 7 records\n' ret = subprocess.check_output(rec_controlCmd, stderr=subprocess.STDOUT) self.assertEqual(ret, expected) except subprocess.CalledProcessError as e: print(e.output) raise class testRoutingTagFFI(RoutingTagTest): _confdir = 'RoutingTagFFI' _config_template = """ log-common-errors=yes use-incoming-edns-subnet=yes edns-subnet-whitelist=ecs-echo.example. forward-zones=ecs-echo.example=%s.24 """ % (os.environ['PREFIX']) _lua_dns_script_file = """ local ffi = require("ffi") ffi.cdef[[ typedef struct pdns_ffi_param pdns_ffi_param_t; const char* pdns_ffi_param_get_qname(pdns_ffi_param_t* ref); void pdns_ffi_param_set_routingtag(pdns_ffi_param_t* ref, const char* rtag); ]] function gettag_ffi(obj) for line in io.lines('tagfile') do local rtag = ffi.string(line) ffi.C.pdns_ffi_param_set_routingtag(obj, rtag) break end return 0 end """ def testSendECS(self): # First send an ECS query with routingTag self.setRoutingTag('foo') expected1 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.2.0/24') ecso = clientsubnetoption.ClientSubnetOption('192.0.2.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expected1) # Now check a cache hit with the same routingTag (but no ECS) query = dns.message.make_query(nameECS, 'TXT', 'IN') self.checkECSQueryHit(query, expected1) expected2 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '127.0.0.0/24') # And see if a different tag does *not* hit the first one self.setRoutingTag('bar') query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) # And see if a *no* tag does *not* hit the first one expected3 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.3.0/24') self.setRoutingTag(None) ecso = clientsubnetoption.ClientSubnetOption('192.0.3.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expected3) # And see if an unknown tag from the same subnet does hit the last self.setRoutingTag('baz') ecso = clientsubnetoption.ClientSubnetOption('192.0.3.2', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.checkECSQueryHit(query, expected3) # And a no tag and no subnet query does hit the general case self.setRoutingTag(None) query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) # And a unknown tag and no subnet query does hit the general case self.setRoutingTag('bag') query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) return #remove this line to peek at cache rec_controlCmd = [os.environ['RECCONTROL'], '--config-dir=%s' % 'configs/' + self._confdir, 'dump-cache y'] try: expected = 'dumped 6 records\n' ret = subprocess.check_output(rec_controlCmd, stderr=subprocess.STDOUT) self.assertEqual(ret, expected) except subprocess.CalledProcessError as e: print(e.output) raise class UDPRoutingResponder(DatagramProtocol): @staticmethod def ipToStr(option): if option.family == clientsubnetoption.FAMILY_IPV4: ip = socket.inet_ntop(socket.AF_INET, struct.pack('!L', option.ip)) elif option.family == clientsubnetoption.FAMILY_IPV6: ip = socket.inet_ntop(socket.AF_INET6, struct.pack('!QQ', option.ip >> 64, option.ip & (2 ** 64 - 1))) return ip def datagramReceived(self, datagram, address): request = dns.message.from_wire(datagram) response = dns.message.make_response(request) response.flags |= dns.flags.AA ecso = None if (request.question[0].name == dns.name.from_text(nameECS) or request.question[0].name == dns.name.from_text(nameECSInvalidScope)) and request.question[0].rdtype == dns.rdatatype.TXT: text = emptyECSText for option in request.options: if option.otype == clientsubnetoption.ASSIGNED_OPTION_CODE and isinstance(option, clientsubnetoption.ClientSubnetOption): text = self.ipToStr(option) + '/' + str(option.mask) # Send a scope more specific than the received source for nameECSInvalidScope if request.question[0].name == dns.name.from_text(nameECSInvalidScope): ecso = clientsubnetoption.ClientSubnetOption("192.0.42.42", 32, 32) else: ecso = clientsubnetoption.ClientSubnetOption(self.ipToStr(option), option.mask, option.mask) answer = dns.rrset.from_text(request.question[0].name, ttlECS, dns.rdataclass.IN, 'TXT', text) response.answer.append(answer) elif request
.question[0].name
== dns.name.from_text(nameECS) and request.question[0].rdtype == dns.rdatatype.NS: answer = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'NS', 'ns1.ecs-echo.example.') response.answer.append(answer) additional = dns.rrset.from_text('ns1.ecs-echo.example.', 15, dns.rdataclass.IN, 'A', os.environ['PREFIX'] + '.24') response.additional.append(additional)
######################
######################################################### # _*_ coding: utf-8 # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # from __future__ import unicode_literals from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'utf8_03.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of an XlsxWriter file with utf-8 strings.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet('Café') worksheet.write('A1', 'Café') workbook.close() self.assertExcelEqual()
. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ftrl-proximal for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.training import optimizer from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["train.FtrlOptimizer"]) class FtrlOptimizer(optimizer.Optimizer): """Optimizer that implements the FTRL algorithm. This version has support for both online L2 (McMahan et al., 2013) and shrinkage-type L2, which is the addition of an L2 penalty to the loss function. References: Ad-click prediction: [McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200) ([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526)) """ def __init__(self, learning_rate, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name="Ftrl", accum_name=None, linear_name=None, l2_shrinkage_regularization_strength=0.0, beta=None): r"""Construct a new FTRL optimizer. Args: learning_rate: A float value or a constant float `Tensor`. learning_rate_power: A float value, must be less or equal to zero. Controls how the learning rate decreases during training. Use zero for a fixed learning rate. See section 3.1 in (McMahan et al., 2013). initial_accumulator_value: The starting value for accumulators. Only zero or positive values are allowed. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. use_locking: If `True` use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to "Ftrl". accum_name: The suffix for the variable that keeps the gradient squared accumulator. If not present, defaults to name. linear_name: The suffix for the variable that keeps the linear gradient accumulator. If not present, defaults to name + "_1". l2_shrinkage_regularization_strength: A float value, must be greater than or equal to zero. This differs from L2 above in that the L2 above is a stabilization penalty, whereas this L2 shrinkage is a magnitude penalty. The FTRL formulation can be written as: w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where \hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss function w.r.t. the weights w. Specifically, in the absence of L1 regularization, it is equivalent to the following update rule: w_{t+1} = w_t - lr_t / (beta + 2*L2*lr_t) * g_t - 2*L2_shrinkage*lr_t / (beta + 2*L2*lr_t) * w_t where lr_t is the learning rate at t. When input is sparse shrinkage will only happen on the active weights. beta: A float value; corresponds to the beta parameter in the paper. Raises: ValueError: If one of the arguments is invalid. References: Ad-click prediction: [McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200) ([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526)) """ super(FtrlOptimizer, self).__init__(use_locking, name) if initial_accumulator_value < 0.0: raise ValueError( "initial_accumulator_value %f needs to be positive or zero" % initial_accumulator_value) if learning_rate_power > 0.0: raise ValueError("learning_rate_power %f needs to be negative or zero" % learning_rate_power) if l1_regularization_strength < 0.0: raise ValueError( "l1_regularization_strength %f needs to be positive or zero" % l1_regularization_strength) if l2_regularization_strength < 0.0: raise ValueError( "l2_regularization_strength %f needs to be positive or zero" % l2_regularization_strength) if l2_shrinkage_regularization_strength < 0.0: raise ValueError( "l2_shrinkage_regularization_strength %f needs to be positive" " or zero" % l2_shrinkage_regularization_strength) self._learning_rate = learning_rate self._learning_rate_power = learning_rate_power self._initial_accumulator_value = initial_accumulator_value self._l1_regularization_strength = l1_regularization_strength self._l2_regularization_strength = l2_regularization_strength self._beta = (0.0 if beta is None else beta) self._l2_shrinkage_regularization_strength = ( l2_shrinkage_regularization_strength) self._learning_rate_tensor = None self._learning_rate_power_tensor = None self._l1_regularization_strength_tensor = None self._adjusted_l2_regularization_strength_tensor = None self._l2_shrinkage_regularization_strength_tensor = None self._accum_name = accum_name self._linear_name = linear_name def _create_slots(self, var_list): # Create the "accum" and "linear" slots. for v in var_list: val = constant_op.constant( self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape()) self._get_or_make_slot(v, val, "accum", self._accum_name or self._name) self._zeros_slot(v, "linear", self._linear_name or self._name) def _prepare(self): self._learning_rate_tensor = ops.convert_to_tensor( self._learning_rate, name="learning_rate") self._l1_regularization_strength_tensor = ops.convert_to_tensor( self._l1_regularization_strength, name="l1_regularization_strength") # L2 regularization strength with beta added in so that the underlying # TensorFlow ops do not need to include that parameter. self._adjusted_l2_regularization_strength_tensor = ops.convert_to_tensor( self._l2_regularization_strength + self._beta / (2. * math_ops.maximum(self._learning_rate, 1e-36)), name="adjusted_l2_regularization_strength") assert self._adjusted_l2_regularization_strength_tensor is not None self._beta_tensor = ops.convert_to_tensor(self._beta, name="beta") self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor( self._l2_shrinkage_regularization_strength, name="l2_shrinkage_regularization_strength") self._learning_rate_power_tensor = ops.convert_to_tensor( self._learning_rate_power, name="learning_rate_power") def _apply_dense(self, grad, var): accum = self.get_slot(var, "accum") linear = self.get_slot(var, "linear") if self._l2_shrinkage_regularization_strength <= 0.0: return training_ops.apply_ftrl( var, accum, linear, grad, math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype
), math_ops.cast(self._l1_regularization_strength_tensor, var.dtyp
e.base_dtype), math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
# File: setup.py # Version: 3 # Description: Setup for SHA2017 badge # License: MIT # Authors: Renze Nicolai <renze@rnplus.nl> # Thomas Roos <?> import ugfx, badge, appglue, dialogs, easydraw, time def asked_nickname(value): if value: badge.nvs_set_str("owner", "name", value) # Do the firstboot magic newState = 1 if badge.nvs_get_u8('badge', 'setup.state', 0) == 0 else 3
badge.nvs_set_u8('badge', 'setup.state', newState) # Show the user that we are done easydraw.msg("Hi "+value+"!", 'Your nick has been stored to flash!') time.sleep(0.5) else: badge.nvs_set_u8('badge', 'setup.state', 2) # Skip the sponsors badge.nvs_set_u8('sponsors', 'shown', 1) appglue.home() ugfx.init() nickname = badge.nvs_get_str("owner", "name", "") dialogs.prompt_
text("Nickname", nickname, cb=asked_nickname)
import os from home.models import ReplicaSet, WhatTorrent, WhatFulltext def run_checks(): errors = [] warnings = [] # Check WhatFulltext integrity def check_whatfulltext(): w_torrents = dict((w.id, w) for w in WhatTorrent.objects.defer('torrent_file').all()) w_fulltext = dict((w.id, w) for w in WhatFulltext.objects.all()) for id, w_t in w_torrents.items(): if id not in w_fulltext: errors.append(u'{0} does not have a matching fulltext entry.'.format(w_t)) elif not w_fulltext[id].match(w_t): errors.append(u'{0} does not match info with fulltext entry.'.format(w_t)) for id, w_f in w_fulltext.items(): if id not in w_torrents: errors.append(u'{0} does not have a matching whattorrent entry.'.format(w_f)) check_whatfulltext() for replica_set in ReplicaSet.objects.all(): m_torrents = {} for instance in replica_set.transinstance_set.all(): i_m_torrents = instance.get_m_torrents_by_hash() i_t_torrents = instance.get_t_torrents_by_hash(['id', 'hashString']) for hash, m_torrent in i_m_torrents.items(): # Check if this torrent is already in another instance if hash in m_torrents: warnings.append(u'{0} is already in another instance of ' u'the same replica set: {1}' .format(m_torrent, m_torrents[hash].instance)) # Check if the instance has the torrent if hash not in i_t_torrents: errors.append(u'{0} is in DB, but not in Transmission at instance {1}' .format(m_torrent, instance)) m_torrents[hash] = m_torrent # Check for the presence of metafiles if the instance
is a master if replica_set.is_master: files_in_dir = os.listdir(m_torrent.path) if not any('.torrent' in f for f in files_in_dir): errors.append(u'Missing .torrent file for {0} at {1}' .f
ormat(m_torrent, instance)) if not any('ReleaseInfo2.txt' == f for f in files_in_dir): errors.append(u'Missing ReleaseInfo2.txt for {0} at {1}' .format(m_torrent, instance)) for hash, t_torrent in i_t_torrents.items(): # Check if the database has the torrent if hash not in i_m_torrents: errors.append(u'{0} is in Transmission, but not in DB at instance {1}' .format(t_torrent, instance)) return { 'errors': errors, 'warnings': warnings }
import asposecellscloud from asposecellscloud.CellsApi import CellsApi from asposecellscloud.CellsApi import ApiException import asposestoragecloud from asposestoragecloud.StorageApi import StorageApi apiKey = "XXXXX" #sepcify App Key appSid = "XXXXX" #sepcify App SID apiServer = "http://api.aspose.com/v1.1" data_folder = "../../data/" #Instantiate Aspose Storage API SDK storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True) storageA
pi = StorageApi(storage_apiClient) #Instantiate Aspose Cells API SDK api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True) cellsApi = CellsApi(api_client); #set input file name filename = "Sample_Test_Book.xls" sheetName = "Sheet1" range = "A1:A12" #upload file to aspose cloud storage storageApi.PutCreate(Path=filename, file=dat
a_folder + filename) try: #invoke Aspose.Cells Cloud SDK API to clear cells formatting in a worksheet response = cellsApi.PostClearFormats(name=filename, sheetName=sheetName, range=range) if response.Status == "OK": #download updated Workbook from storage server response = storageApi.GetDownload(Path=filename) outfilename = "c:/temp/" + filename with open(outfilename, 'wb') as f: for chunk in response.InputStream: f.write(chunk) except ApiException as ex: print "ApiException:" print "Code:" + str(ex.code) print "Message:" + ex.message
# Support for building census bundles in Ambry __version__ = 0.8 __author__ = 'eric@civicknowledge.com' from .generator import * from .schema import * from .sources import * from .transforms import * import ambry.bundle class AcsBundle(ambry.bundle.Bundle, MakeTablesMixin, MakeSourcesMixin, JamValueMixin, JoinGeofileMixin): # Which of the first columns in the data tables to use. header_cols = [ # Column name, Description, width, datatype, column position #('FILEID','File Identification',6,'str' ), #('FILETYPE','File Type',6,'str'), ('STUSAB','State/U.S.-Abbreviation (USPS)',2,'str',2 ), ('CHARITER','Character Iteration',3,'str',3 ), ('SEQUENCE','Sequence Number',4,'int',4 ), ('LOGRECNO','Logical Record Number',7,'int',5 ) ] def init(self): from .util import year_release self.year, self.release = year_release(self) self.log("Building Census bundle, year {}, release {}".format(self.year, self.release)) def edit_pipeline(self, pipeline): """Change the SelectPartitionFromSource so it only writes a single partition""" from ambry.etl import SelectPartitionFromSource # THe partition is named only after the table. def select_f(pipe, bundle, source, row): return source.dest_table.name pipeline.select_partition = SelectPartitionFromSource(select_f) @CaptureException def _pre_download(self, gen_cls): """Override the ingestion process to download all of the input files at once. This resolves the contention for the files that would occurr if many generators are trying to download the same files all at once. """ from ambry_sources import download cache = self.library.download_cache source = self.source('b00001') # First; any one will do g = gen_cls(self, source) downloads = [] for spec1, spec2 in g.generate_source_specs(): downloads.append( (spec1.url, cache) ) # The two specs usually point to different files in the same zip archive, but I'm not sure # that is always true. if spec1.url != spec2.url: downloads.append((spec2.url, cache)) # Multi-processing downloads might improve the speed, although probably not by much. for url, cache in downloads: self.log("Pre-downloading: {}".format(url)) download(url, cache) class ACS2009Bundle(AcsBundle): pass class ACS2010Bundle(AcsBundle)
: @CaptureException def ingest(self, sources=None, tables=None, stage=None, force=False, update_tables=True): """Override the ingestion process to download all of the input files at once. This resolves the contention for the files that would occurr if many generators are trying to download the same files all at once. """ from.generator import ACS09TableRowGenerator self._pre
_download(ACS09TableRowGenerator) return super(ACS2010Bundle, self).ingest(sources, tables, stage, force, update_tables)
#!/usr/bin/python from mininet.topo import Topo from mininet.net import Mininet from mininet.cli import CLI from mininet.log import setLogLevel, info, debug from mininet.node import Host, RemoteController, OVSSwitch # Must exist and be owned by quagga user (quagga:quagga by default on Ubuntu) QUAGGA_RUN_DIR = '/var/run/quagga' QCONFIG_DIR = 'configs' ZCONFIG_DIR = 'configs' class SdnIpHost(Host): def __init__(self, name, ip, route, *args, **kwargs): Host.__init__(self, name, ip=ip, *args, **kwargs) self.route = route def config(self, **kwargs): Host.config(self, **kwargs) debug("configuring route %s" % self.route) self.cmd('ip route add default via %s' % self.route) class Router(Host): def __init__(self, name, quaggaConfFile, zebraConfFile, intfDict, *args, **kwargs): Host.__init__(self, name, *args, **kwargs) self.quaggaConfFile = quaggaConfFile self.zebraConfFile = zebraConfFile self.intfDict = intfDict def config(self, **kwargs): Host.config(self, **kwargs) self.cmd('sysctl net.ipv4.ip_forward=1') for intf, attrs in self.intfDict.items(): self.cmd('ip addr flush dev %s' % intf) # setup mac address to specific interface if 'mac' in attrs: self.cmd('ip link set %s down' % intf) self.cmd('ip link set %s address %s' % (intf, attrs['mac'])) self.cmd('ip link set %s up ' % intf) # setup address to interfaces for addr in attrs['ipAddrs']: self.cmd('ip addr add %s dev %s' % (addr, intf)) self.cmd('zebra -d -f %s -z %s/zebra%s.api -i %s/zebra%s.pid' % (self.zebraConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name)) self.cmd('bgpd -d -f %s -z %s/zebra%s.api -i %s/bgpd%s.pid' % (self.quaggaConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name)) def terminate(self): self.cmd("ps ax | egrep 'bgpd%s.pid|zebra%s.pid' | awk '{print $1}' | xargs kill" % (self.name, self.name)) Host.terminate(self) class SdnIpTopo(Topo): def build(self): zebraConf = '{}/zebra.conf'.format(ZCONFIG_DIR) s1 = self.addSwitch('s1', dpid='0000000000000001', cls=OVSSwitch, failMode="standalone") # Quagga 1 bgpEth0 = { 'mac': '00:00:00:00:00:01', 'ipAddrs': [ '10.0.1.1/24', ] } bgpIntfs = { 'bgpq1-eth0': bgpEth0 } bgpq1 = self.addHost("bgpq1", cls=Router, quaggaConfFile='{}/quagga1.conf'.format(QCONFIG_DIR), zebraConfFile=zebraConf, intfDict=bgpIntfs) self.addLink(bgpq1, s1) # Quagga 2 bgpEth0 = { 'mac': '00:00:00:00:00:02', 'ipAdd
rs': [ '10.0.2.1/24', ] } bgpIntfs = { 'bgpq2-eth0': bgpEth0 } bgpq2 = self.addHost("bgpq2", cls=Router, quaggaConfFile='{}/quagga2.conf'.format(QCONFIG_DIR), zebraConfFile=zebraConf,
intfDict=bgpIntfs) self.addLink(bgpq2, s1) topos = {'sdnip': SdnIpTopo} if __name__ == '__main__': setLogLevel('debug') topo = SdnIpTopo() net = Mininet(topo=topo, controller=RemoteController) net.start() CLI(net) net.stop() info("done\n")
# This file is autogenerated. Do not edit it manually. # If you want change the content of this file, edit #
# spec/fixtures/responses/whois.nic.pw/status_available # # and regenerate the tests with the following script # # $ scripts
/generate_tests.py # from nose.tools import * from dateutil.parser import parse as time_parse import yawhois class TestWhoisNicPwStatusAvailable(object): def setUp(self): fixture_path = "spec/fixtures/responses/whois.nic.pw/status_available.txt" host = "whois.nic.pw" part = yawhois.record.Part(open(fixture_path, "r").read(), host) self.record = yawhois.record.Record(None, [part]) def test_status(self): eq_(self.record.status, []) def test_available(self): eq_(self.record.available, True) def test_domain(self): eq_(self.record.domain, None) def test_nameservers(self): eq_(self.record.nameservers.__class__.__name__, 'list') eq_(self.record.nameservers, []) def test_admin_contacts(self): eq_(self.record.admin_contacts.__class__.__name__, 'list') eq_(self.record.admin_contacts, []) def test_registered(self): eq_(self.record.registered, False) def test_created_on(self): eq_(self.record.created_on, None) def test_registrar(self): eq_(self.record.registrar, None) def test_registrant_contacts(self): eq_(self.record.registrant_contacts.__class__.__name__, 'list') eq_(self.record.registrant_contacts, []) def test_technical_contacts(self): eq_(self.record.technical_contacts.__class__.__name__, 'list') eq_(self.record.technical_contacts, []) def test_updated_on(self): eq_(self.record.updated_on, None) def test_domain_id(self): eq_(self.record.domain_id, None) def test_expires_on(self): eq_(self.record.expires_on, None) def test_disclaimer(self): eq_(self.record.disclaimer, None)
#!/usr/bin/env python ''' simple templating system for mavlink generator Copyright Andrew Tridgell 2011 Released under GNU GPL version 3 or later ''' from mavparse import MAVParseError class MAVTemplate(object): '''simple templating system''' def __init__(self, sta
rt_var_token="${", end_var_token="}", start_rep_token="${{", end_rep_token="}}", trim_leading_lf=True, checkmissing=True): self.start_var_token = start_var_token self.end_var_token = en
d_var_token self.start_rep_token = start_rep_token self.end_rep_token = end_rep_token self.trim_leading_lf = trim_leading_lf self.checkmissing = checkmissing def find_end(self, text, start_token, end_token): '''find the of a token. Returns the offset in the string immediately after the matching end_token''' if not text.startswith(start_token): raise MAVParseError("invalid token start") offset = len(start_token) nesting = 1 while nesting > 0: idx1 = text[offset:].find(start_token) idx2 = text[offset:].find(end_token) if idx1 == -1 and idx2 == -1: raise MAVParseError("token nesting error") if idx1 == -1 or idx1 > idx2: offset += idx2 + len(end_token) nesting -= 1 else: offset += idx1 + len(start_token) nesting += 1 return offset def find_var_end(self, text): '''find the of a variable''' return self.find_end(text, self.start_var_token, self.end_var_token) def find_rep_end(self, text): '''find the of a repitition''' return self.find_end(text, self.start_rep_token, self.end_rep_token) def substitute(self, text, subvars={}, trim_leading_lf=None, checkmissing=None): '''substitute variables in a string''' if trim_leading_lf is None: trim_leading_lf = self.trim_leading_lf if checkmissing is None: checkmissing = self.checkmissing # handle repititions while True: subidx = text.find(self.start_rep_token) if subidx == -1: break endidx = self.find_rep_end(text[subidx:]) if endidx == -1: raise MAVParseError("missing end macro in %s" % text[subidx:]) part1 = text[0:subidx] part2 = text[subidx+len(self.start_rep_token):subidx+(endidx-len(self.end_rep_token))] part3 = text[subidx+endidx:] a = part2.split(':') field_name = a[0] rest = ':'.join(a[1:]) v = getattr(subvars, field_name, None) if v is None: raise MAVParseError('unable to find field %s' % field_name) t1 = part1 for f in v: t1 += self.substitute(rest, f, trim_leading_lf=False, checkmissing=False) if len(v) != 0 and t1[-1] in ["\n", ","]: t1 = t1[:-1] t1 += part3 text = t1 if trim_leading_lf: if text[0] == '\n': text = text[1:] while True: idx = text.find(self.start_var_token) if idx == -1: return text endidx = text[idx:].find(self.end_var_token) if endidx == -1: raise MAVParseError('missing end of variable: %s' % text[idx:idx+10]) varname = text[idx+2:idx+endidx] if isinstance(subvars, dict): if not varname in subvars: if checkmissing: raise MAVParseError("unknown variable in '%s%s%s'" % ( self.start_var_token, varname, self.end_var_token)) return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars, trim_leading_lf=False, checkmissing=False) value = subvars[varname] else: value = getattr(subvars, varname, None) if value is None: if checkmissing: raise MAVParseError("unknown variable in '%s%s%s'" % ( self.start_var_token, varname, self.end_var_token)) return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars, trim_leading_lf=False, checkmissing=False) text = text.replace("%s%s%s" % (self.start_var_token, varname, self.end_var_token), str(value)) return text def write(self, file, text, subvars={}, trim_leading_lf=True): '''write to a file with variable substitution''' file.write(self.substitute(text, subvars=subvars, trim_leading_lf=trim_leading_lf))
#!/usr/bin/env python # Copyright 2020 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import heapq import os import platform import random import signal import subprocess # Base dir of the build products for Release and Debug. OUT_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out')) def list_processes_linux(): """Returns list of tuples (pid, command) of processes running in the same out directory as this checkout. """ if platform.system() != 'Linux': return [] try: cmd = 'pgrep -fa %s' % OUT_DIR output = subprocess.check_output(cmd, shell=True) or '' processes = [ (int(line.split()[0]), line[line.index(OUT_DIR):]) for line in output.splitlines() ] # Filter strange process with name as out dir. return [p for p in processes if p[1] != OUT_DIR] except: return [] def kill_processes_linux(): """Kill stray processes on the system that started in the same out directory. All swarming tasks share the same out directory location. """ if platform.system() != 'Linux': return for pid, cmd in list_processes_linux(): try: print('Attempting to kill %d - %s' % (pid, cmd)) os.kill(pid, signal.SIGKILL) except: pass class FixedSizeTopList(): """Utility collection for gathering a fixed number of elements with the biggest value for the given key. It employs a heap from which we pop the smallest element when the collection is 'full'. If you need a reversed behaviour (collect min values) just provide an inverse key.""" def __init__(self, size, key=None
): self.size = size self.key = key or (lambda x: x) self.data = [] self.discriminator = 0 def add(self, elem): elem_k = self.key(elem) heapq.heappush(self.data, (
elem_k, self.extra_key(), elem)) if len(self.data) > self.size: heapq.heappop(self.data) def extra_key(self): # Avoid key clash in tuples sent to the heap. # We want to avoid comparisons on the last element of the tuple # since those elements might not be comparable. self.discriminator += 1 return self.discriminator def as_list(self): original_data = [rec for (_, _, rec) in self.data] return sorted(original_data, key=self.key, reverse=True)
# -*- coding: utf-8 -*- from plugins import Plugin from PyQt4 import QtCore, QtGui import tempfile, codecs import os, subprocess class rst2pdf(Plugin): name='rst2pdf' shortcut='Ctrl+F8' description='Run through rst2pdf and preview' tmpf=None def run(self):
print "Running rst2pdf" text=unicode(self.client.editor.toPlainText()) # Save to a named file if self.tmpf is None: self.tmpf=tempfile.NamedTemporaryFile(delete=False) self.tmpf.cl
ose() f=codecs.open(self.tmpf.name,'w','utf-8') f.write(text) f.close() # FIXME: unsafe # FIXME: show output of the process somewhere try: self.client.notify('Running rst2pdf') subprocess.check_call('rst2pdf %s'%self.tmpf.name, shell=True) except subprocess.CalledProcessError: #FIXME: show error dialog return # Open with default PDF viewer # FIXME: would be awesome if we could know when this is still open # and not launch it again, since it refreshes automatically. self.client.notify('Launching PDF viewer') QtGui.QDesktopServices.openUrl(QtCore.QUrl('file://'+self.tmpf.name+'.pdf'))
s.conversion import RB_to_OPLS from mbuild.utils.io import import_ from mbuild.utils.sorting import natural_sort from .hoomd_snapshot import to_hoomdsnapshot gsd = import_("gsd") gsd.hoomd = import_("gsd.hoomd") hoomd = import_("hoomd") hoomd.md = import_("hoomd.md") hoomd.md.pair = import_("hoomd.md.pair") hoomd.md.special_pair = import_("hoomd.md.special_pair") hoomd.md.charge = import_("hoomd.md.charge") hoomd.md.bond
= import_("hoomd.md.bond") hoomd.md.angle = import_("hoomd.md.angle") hoomd.md.dihedral = import_("hoomd.md.dihedral") hoomd.group = import_("hoomd.group") def create_hoomd_simulation( structure, ref_distance=1.0, ref_mass=1.0, ref_energy=1.0, r_cut=1.2, auto_scale=False, snapshot_kwargs={}, pppm_kwargs={"Nx": 8, "Ny": 8, "Nz": 8, "order": 4}, init_snap=None, restart=None, ): """Convert a parametrized pmd.Structure to hoomd.SimulationContext. Parame
ters ---------- structure : parmed.Structure ParmEd Structure object ref_distance : float, optional, default=1.0 Reference distance for conversion to reduced units ref_mass : float, optional, default=1.0 Reference mass for conversion to reduced units ref_energy : float, optional, default=1.0 Reference energy for conversion to reduced units r_cut : float, optional, default 1.2 Cutoff radius, in reduced units auto_scale : bool, optional, default=False Automatically use largest sigma value as ref_distance, largest mass value as ref_mass, and largest epsilon value as ref_energy snapshot_kwargs : dict Kwargs to pass to to_hoomdsnapshot pppm_kwargs : dict Kwargs to pass to hoomd's pppm function init_snap : hoomd.data.SnapshotParticleData, optional, default=None Initial snapshot to which to add the ParmEd structure object (useful for rigid bodies) restart : str, optional, default=None Path to the gsd file from which to restart the simulation. https://hoomd-blue.readthedocs.io/en/v2.9.4/restartable-jobs.html Note: It is assumed that the ParmEd structure and the system in restart.gsd contain the same types. The ParmEd structure is still used to initialize the forces, but restart.gsd is used to initialize the system state (e.g., particle positions, momenta, etc). Returns ------- hoomd_objects : list List of hoomd objects created during conversion ReferenceValues : namedtuple Values used in scaling Notes ----- While the hoomd objects are returned, the hoomd.SimulationContext is accessible via `hoomd.context.current`. If you pass a non-parametrized pmd.Structure, you will not have angle, dihedral, or force field information. You may be better off creating a hoomd.Snapshot. Reference units should be expected to convert parmed Structure units: angstroms, kcal/mol, and daltons """ if isinstance(structure, mb.Compound): raise ValueError( "You passed mb.Compound to create_hoomd_simulation, there will be " "no angles, dihedrals, or force field parameters. Please use " "hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, then " "create your own hoomd context and pass your hoomd.Snapshot to " "hoomd.init.read_snapshot()" ) elif not isinstance(structure, pmd.Structure): raise ValueError( "Please pass a parmed.Structure to create_hoomd_simulation" ) _check_hoomd_version() version_numbers = _check_hoomd_version() if float(version_numbers[0]) >= 3: warnings.warn( "Warning when using Hoomd 3, potential API change where the hoomd " "context is not updated upon creation of forces - utilize the " "returned `hoomd_objects`" ) hoomd_objects = [] # Potential adaptation for Hoomd v3 API if auto_scale: ref_mass = max([atom.mass for atom in structure.atoms]) pair_coeffs = list( set((a.type, a.epsilon, a.sigma) for a in structure.atoms) ) ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1] ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2] ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"]) ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy) if not hoomd.context.current: hoomd.context.initialize("") if restart is None: snapshot, _ = to_hoomdsnapshot( structure, ref_distance=ref_distance, ref_mass=ref_mass, ref_energy=ref_energy, **snapshot_kwargs, hoomd_snapshot=init_snap, ) hoomd_objects.append(snapshot) hoomd_system = hoomd.init.read_snapshot(snapshot) hoomd_objects.append(hoomd_system) else: with gsd.hoomd.open(restart) as f: snapshot = f[-1] hoomd_objects.append(snapshot) hoomd_system = hoomd.init.read_gsd(restart, restart=restart) hoomd_objects.append(hoomd_system) print("Simulation initialized from restart file") nl = hoomd.md.nlist.cell() nl.reset_exclusions(exclusions=["1-2", "1-3"]) hoomd_objects.append(nl) if structure.atoms[0].type != "": print("Processing LJ and QQ") lj = _init_hoomd_lj( structure, nl, r_cut=r_cut, ref_distance=ref_distance, ref_energy=ref_energy, ) qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs) hoomd_objects.append(lj) hoomd_objects.append(qq) if structure.adjusts: print("Processing 1-4 interactions, adjusting neighborlist exclusions") lj_14, qq_14 = _init_hoomd_14_pairs( structure, nl, ref_distance=ref_distance, ref_energy=ref_energy ) hoomd_objects.append(lj_14) hoomd_objects.append(qq_14) if structure.bond_types: print("Processing harmonic bonds") harmonic_bond = _init_hoomd_bonds( structure, ref_distance=ref_distance, ref_energy=ref_energy ) hoomd_objects.append(harmonic_bond) if structure.angle_types: print("Processing harmonic angles") harmonic_angle = _init_hoomd_angles(structure, ref_energy=ref_energy) hoomd_objects.append(harmonic_angle) if structure.dihedral_types: print("Processing periodic torsions") periodic_torsions = _init_hoomd_dihedrals( structure, ref_energy=ref_energy ) hoomd_objects.append(periodic_torsions) if structure.rb_torsion_types: print("Processing RB torsions") rb_torsions = _init_hoomd_rb_torsions(structure, ref_energy=ref_energy) hoomd_objects.append(rb_torsions) print("HOOMD SimulationContext updated from ParmEd Structure") return hoomd_objects, ref_values def _init_hoomd_lj(structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0): """LJ parameters.""" # Identify the unique atom types before setting atom_type_params = {} for atom in structure.atoms: if atom.type not in atom_type_params: atom_type_params[atom.type] = atom.atom_type # Set the hoomd parameters for self-interactions lj = hoomd.md.pair.lj(r_cut, nl) for name, atom_type in atom_type_params.items(): lj.pair_coeff.set( name, name, sigma=atom_type.sigma / ref_distance, epsilon=atom_type.epsilon / ref_energy, ) # Cross interactions, mixing rules, NBfixes all_atomtypes = sorted(atom_type_params.keys()) for a1, a2 in itertools.combinations_with_replacement(all_atomtypes, 2): nb_fix_info = atom_type_params[a1].nbfix.get(a2, None) # nb_fix_info = (rmin, eps, rmin14, eps14) if nb_fix_info is None: # No nbfix means use mixing rule to find cross-interaction if structure.combining_rule == "lorentz": sigma = ( atom_type_params[a1].sigma
""" WSGI config for kanban project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "kanban.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kanban.settings")
# This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_
application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
#!/usr/bin/env python # The really simple Python version of Qwt-5.0.0/examples/simple # for debugging, requires: python configure.py --trace ... if False: import sip
sip.settracemask(0x3f) import
sys import qt import Qwt5 as Qwt from Qwt5.anynumpy import * class SimplePlot(Qwt.QwtPlot): def __init__(self, *args): Qwt.QwtPlot.__init__(self, *args) # make a QwtPlot widget self.setTitle('ReallySimpleDemo.py') self.insertLegend(Qwt.QwtLegend(), Qwt.QwtPlot.RightLegend) # set axis titles self.setAxisTitle(Qwt.QwtPlot.xBottom, 'x -->') self.setAxisTitle(Qwt.QwtPlot.yLeft, 'y -->') # insert a few curves cSin = Qwt.QwtPlotCurve('y = sin(x)') cSin.setPen(qt.QPen(qt.Qt.red)) cSin.attach(self) cCos = Qwt.QwtPlotCurve('y = cos(x)') cCos.setPen(qt.QPen(qt.Qt.blue)) cCos.attach(self) # make a Numeric array for the horizontal data x = arange(0.0, 10.0, 0.1) # initialize the data cSin.setData(x, sin(x)) cCos.setData(x, cos(x)) # insert a horizontal marker at y = 0 mY = Qwt.QwtPlotMarker() mY.setLabel(Qwt.QwtText('y = 0')) mY.setLabelAlignment(qt.Qt.AlignRight | qt.Qt.AlignTop) mY.setLineStyle(Qwt.QwtPlotMarker.HLine) mY.setYValue(0.0) mY.attach(self) # insert a vertical marker at x = 2 pi mX = Qwt.QwtPlotMarker() mX.setLabel(Qwt.QwtText('x = 2 pi')) mX.setLabelAlignment(qt.Qt.AlignRight | qt.Qt.AlignTop) mX.setLineStyle(Qwt.QwtPlotMarker.VLine) mX.setXValue(2*pi) mX.attach(self) # replot self.replot() # __init__() # class Plot def make(): demo = SimplePlot() demo.resize(500, 300) demo.show() return demo # make() def main(args): app = qt.QApplication(args) demo = make() app.setMainWidget(demo) sys.exit(app.exec_loop()) # main() # Admire if __name__ == '__main__': main(sys.argv) # Local Variables: *** # mode: python *** # End: ***
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from
__future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from pants.util.dirutil import safe_mkdir_for class ReproMixin(object): """ Additional helper methods for use in Repro tests"""
def add_file(self, root, path, content): """Add a file with specified contents :param str root: Root directory for path. :param str path: Path relative to root. :param str content: Content to write to file. """ fullpath = os.path.join(root, path) safe_mkdir_for(fullpath) with open(fullpath, 'w') as outfile: outfile.write(content) def assert_not_exists(self, root, path): """Assert a file at relpath doesn't exist :param str root: Root directory of path. :param str path: Path relative to tar.gz. :return: bool """ fullpath = os.path.join(root, path) self.assertFalse(os.path.exists(fullpath)) def assert_file(self, root, path, expected_content=None): """ Assert that a file exists with the content specified :param str root: Root directory of path. :param str path: Path relative to tar.gz. :param str expected_content: file contents. :return: bool """ fullpath = os.path.join(root, path) self.assertTrue(os.path.isfile(fullpath)) if expected_content: with open(fullpath, 'r') as infile: content = infile.read() self.assertEqual(expected_content, content)
import genxmlif from genxmlif.xmlifODict import odict
xmlIf = genxmlif.chooseXmlIf(genxmlif.XMLIF_ELEMENTTREE) xmlTree = xmlIf.createXmlTree(None, "testTree", {"rootAttr1":"RootAttr1"}) xmlRootNode = xmlTree.getRootNode() myDict = odict( (("childTag1","123"), ("childTag2","123")) ) xmlRootNode.appendChild("childTag", myDict) xmlRootNode.appendChild("childTag", {"childTag1":"123456", "childTag2":"123456"}) xmlRootNode.appendChild("childTag", {"childTag1":"123456789", "childTag3":"1234", "childTag2":"123456789"}) xmlRootNode.append
Child("childTag", {"childTag1":"1", "childTag2":"1"}) print xmlTree.printTree(prettyPrint=1) print xmlTree print xmlTree.getRootNode()
from jsbuild.attrdict import AttrDict from time import strftime class Manifest(AttrDict): def __init__(self,*args,**kwargs): super(AttrDict, self).__init__(*args,**kwargs) self._buffer_ = None self._parent_ = None if not self.__contains__('_dict_'): self['_dict_'] = {} self['_dict_']['timestamp'] = int(strftime("%Y%m%d%H%M")) def __getitem__(self,name): item = super(Manifest,self).__geti
tem__(name) if isinstance(item,Manifest) and
not item._parent_: item._parent_ = self elif isinstance(item,str): root = self while root._parent_: root = root._parent_ item = item%root._dict_ return item
import sys import unittest sys.path.append('../../') import lib.event class TestEvents(unittest.TestCase): def setUp(self): TestEvents.successful = False TestEvents.successful2 = False def test_subscribe(self): @lib.event.subscribe('test') def subscribe_test(): TestEvents.successful = True lib.event.call('test') self.assertTrue(TestEvents.successful) def test_subscribe_with_params(self): @lib.event.subscribe('test2')
def subscribe_test(successful=False): TestEvents.successful = successful lib.event.call('test2', {'successful': True}) self.assertTrue(TestEvents.successful) def test_subscribe_two_with_params(self):
@lib.event.subscribe('test3') def subscribe_test(successful=False): TestEvents.successful = successful @lib.event.subscribe('test3') def subscribe_test2(successful=False): TestEvents.successful2 = successful lib.event.call('test3', {'successful': True}) self.assertTrue(TestEvents.successful) self.assertTrue(TestEvents.successful2) if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- # import sqlite3 as sqlite import sys import uuid from pysqlcipher3 import dbapi2 as sqlite def main(): print("***************** Welcome to OSS DataMaster-Rigster System *******************") print("* *") print("******************************************************************************") conn = sqlite.connect('DataMasterSystem.db') c = conn.cursor() c.execute("PRAGMA key='data_master_system'") # 对加密的sqlite文件进行解密 try: c.execute('create table data_master_system (data_master_name text, password text, unique_id text)') except sqlite.OperationalError as e: pass unique_id = uuid.uuid1() data_masters = c.execute("select * from data_master_system").fetchall() if len(data_masters) != 0: data_master_name = input("[*] Input your data master name:\n") for col in data_masters: if data_master_name.strip() == col[0]: print("[!] Data Master Name has existed!") print("******************************************************************************") print("* *") print("*********************** Data Master Rigster Is Failed! ***********************") sys.exit(-1) else: data_master_name = input("[*] Input your data master name:\n") password = input("[*] Input your password:\n") repeat_password = input("[*] Input your password again:\n") if password.strip() != repeat_password.strip(): print("[!] Password is not equal to RePassword!") print("******************************************************************************") print("* *") print("*********************** Data Master Rigster Is Failed! ***********************") sys.exit(-1) c.execute('insert into data_master_system values ("{}", "{}", "{}")'.format(
data_master_name, password, unique_id)) conn.commit() c.close() print("***********
*******************************************************************") print("* *") print("********************* Data Master Rigster Is Successful! *********************") if __name__ == '__main__': main()
return cp.all(copied_predt == inplace_predt) for i in range(10): run_threaded_predict(X, rows, predict_df) base_margin = cudf.Series(rng.randn(rows)) self.run_inplace_base_margin(booster, dtrain, X, base_margin) @given(strategies.integers(1, 10), tm.dataset_strategy, shap_parameter_strategy) @settings(deadline=None) def test_shap(self, num_rounds, dataset, param): param.update({"predictor": "gpu_predictor", "gpu_id": 0}) param = dataset.set_params(param) dmat = dataset.get_dmat() bst = xgb.train(param, dmat, num_rounds) test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin) shap = bst.predict(test_dmat, pred_contribs=True) margin = bst.predict(test_dmat, output_margin=True) assume(len(dataset.y) > 0) assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-3, 1e-3) @given(strategies.integers(1, 10), tm.dataset_strategy, shap_parameter_strategy) @settings(deadline=None, max_examples=20) def test_shap_interactions(self, num_rounds, dataset, param): param.update({"predictor": "gpu_predictor", "gpu_id": 0}) param = dataset.set_params(param) dmat = dataset.get_dmat() bst = xgb.train(param, dmat, num_rounds) test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin) shap = bst.predict(test_dmat, pred_interactions=True) margin = bst.predict(test_dmat, output_margin=True) assume(len(dataset.y) > 0) assert np.allclose(np.sum(shap, axis=(len(shap.shape) - 1, len(shap.shape) - 2)), margin, 1e-3, 1e-3) def test_shap_categorical(self): X, y = tm.make_categorical(100, 20, 7, False) Xy = xgb.DMatrix(X, y, enable_categorical=True) booster = xgb.train({"tree_method": "gpu_hist"}, Xy, num_boost_round=10) booster.set_param({"predictor": "gpu_predictor"}) shap = booster.predict(Xy, pred_contribs=True) margin = booster.predict(Xy, output_margin=True) np.testing.assert_allclose( np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3 ) booster.set_param({"predictor": "cpu_predictor"}) shap = booster.predict(Xy, pred_contribs=True) margin = booster.predict(Xy, output_margin=True) np.testing.assert_allclose( np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3 ) def test_predict_leaf_basic(self): gpu_leaf = run_predict_leaf('gpu_predictor') cpu_leaf = run_predict_leaf('cpu_predictor') np.testing.assert_equal(gpu_leaf, cpu_leaf) def run_predict_leaf_booster(self, param, num_rounds, dataset): param = dataset.set_params(param) m = dataset.get_dmat() booster = xgb.train(param, dtrain=dataset.get_dmat(), num_boost_round=num_rounds) booster.set_param({'predictor': 'cpu_predictor'}) cpu_leaf = booster.predict(m, pred_leaf=True) booster.set_param({'predictor': 'gpu_predictor'}) gpu_leaf = booster.predict(m, pred_leaf=True) np.testing.assert_equal(cpu_leaf, gpu_leaf) @given(predict_parameter_strategy, tm.dataset_strategy) @settings(deadline=None) def test_predict_leaf_gbtree(self, param, dataset): param['booster'] = 'gbtree' param['tree_method'] = 'gpu_hist' self.run_predict_leaf_booster(param, 10, dataset) @given(predict_parameter_strategy, tm.dataset_strategy) @settings(deadline=None) def test_predict_leaf_dart(self, param, dataset): param['booster'] = 'dart' param['tree_method'] = 'gpu_hist' self.run_predict_leaf_booster(param, 10, dataset) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.skipif(**tm.no_pandas()) @given(df=data_frames([column('x0', elements=strategies.integers(min_value=0, max_value=3)), column('x1', elements=strategies.integers(min_value=0, max_value=5))], index=range_indexes(min_size=20, max_size=50))) @settings(deadline=None) def test_predict_categorical_split(self, df): from sklearn.metrics import mean_squared_error df = df.astype('category') x0, x1 = df['x0'].to_numpy(), df['x1'].to_numpy() y = (x0 * 10 - 20) + (x1 - 2) dtrain = xgb.DMatrix(df, label=y, enable_categorical=True) params = { 'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor', 'max_depth': 3, 'learning_rate': 1.0, 'base_score': 0.0, 'eval_metric': 'rmse' } eval_history = {} bst = xgb.train(params, dtrain, num_boost_round=5, evals=[(dtrain, 'train')], verbose_eval=False, evals_result=eval_history) pred = bst.predict(dtrain) rmse = mean_squared_error(y_true=y, y_pred=pred, squared=False) np.testing.assert_almost_equal(rmse, eval_history['train']['rmse'][-1], decimal=5) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.parametrize("n_classes", [2, 3]) def test_predict_dart(self, n_classes): from sklearn.datasets import make_classification import cupy as cp n_samples = 1000 X_, y_ = make_classification( n_samples=n_samples, n_informative=5, n_classes=n_classes ) X, y = cp.array(X_), cp.array(y_) Xy = xgb.DMatrix(X, y) if n_classes == 2: params = { "tree_method": "gpu_hist", "booster": "dart", "rate_drop": 0.5, "objective": "binary:logistic" } else: params = { "tree_method": "gpu_hist", "booster": "dart", "rate_drop": 0.5, "objective": "multi:softprob", "num_class": n_classes } booster = xgb.train(params, Xy, num_boost_round=32) # predictor=auto inplace = booster.inplace_predict(X) copied = booster.predict(Xy) cpu_inplace = booster.inplace_predict(X_) booster.set_param({"predictor": "cpu_predictor"}) cpu_copied = booster.predict(Xy) copied = cp.array(copied) cp.testing.assert_allclose(cpu_inplace, copied, atol=1e-6) cp.testing.assert_allclose(cpu_copied, copied, atol=1e-6) cp.testing.assert_allclose(inplace, copied, atol=1e-6) booster.set_param({"predictor": "gpu_predictor"}) inplace = booster.inplace_predict(X) copied = booster.predict(Xy) copied = cp.array(copied) cp.testing.assert_allc
lose(inplace, copied, atol=1e-6) @pytest.mark.skipif(**tm.no_cupy()) def test_dtypes(self): import cupy as cp rows = 1000 cols = 10 rng = cp.random.RandomState(1994) orig = rng.randint(low=0, high=127, size=rows * cols).reshape( rows, cols ) y = rng.randint(low=0, high=127, size=rows) dtrain = xgb.DMatrix(orig, label=y) booster = xgb.t
rain({"tree_method": "gpu_hist"}, dtrain) predt_orig = booster.inplace_predict(orig) # all primitive types in numpy for dtype in [ cp.signedinteger, cp.byte, cp.short, cp.intc, cp.int_, cp.longlong, cp.unsignedinteger, cp.ubyte, cp.ushort, cp.uintc, cp.uint, cp.ulonglong, cp.floating, cp.half, cp.single, cp.double, ]: X = cp.array(orig, dtype=dtype) predt = booster.inplace_predict(X) cp.testing.assert_allclose(predt, predt_orig) # boolean orig = cp.random.binomial(1, 0.5, size=rows * cols).reshape( rows, cols ) predt_orig = booster.inplace_predict(orig) for dtype in [cp.bool8, cp.bool_]: X = cp.array(orig, dtype=dtype) predt = booster.inplace_predict(X)
# # cellulist documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import cellulist # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'cellulist' copyright = u'2015, Elliot Marsden' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = cellulist.__version__ # The full version, including alpha/beta/rc tags. release = cellulist.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'cellulistdoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'cellulist.tex', u'cellulist Documentation', u'Elliot Marsden', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo
= None # For "man
ual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'cellulist', u'cellulist Documentation', [u'Elliot Marsden'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'cellulist', u'cellulist Documentation', u'Elliot Marsden', 'cellulist', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True
from pytest_factoryboy import register from me
inberlin.test.factories impo
rt kiezkasse register(kiezkasse.ProposalFactory)
# This file is part of Indico. # Copyright (C) 2002 - 2022 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from wtforms.fields import StringField from wtforms.validators import DataRequired from wtforms_sqlalchemy.fields import QuerySelectField from indico.core.db.sqlalchemy.descriptions import RenderMode from indico.modules.events.sessions.models.sessions import Session from indico.modules.events.tracks.models.groups import TrackGroup from indico.util.i18n import _ from indico.web.forms.base import IndicoForm, generated_data from indico.web.forms.fields import IndicoMarkdownField class TrackForm(IndicoForm): title = Strin
gField(_('Title'), [DataRequired()]) code = StringField(_('Code')) track_group = QuerySelectField(_('Track group'), default='', allow_blank=True, get_label='title', description=_('Select a track group to which this track should belong')) default_sessi
on = QuerySelectField(_('Default session'), default='', allow_blank=True, get_label='title', description=_('Indico will preselect this session whenever an abstract is ' 'accepted for the track')) description = IndicoMarkdownField(_('Description'), editor=True) def __init__(self, *args, **kwargs): event = kwargs.pop('event') super().__init__(*args, **kwargs) self.default_session.query = Session.query.with_parent(event) self.track_group.query = TrackGroup.query.with_parent(event) class ProgramForm(IndicoForm): program = IndicoMarkdownField(_('Program'), editor=True, mathjax=True) @generated_data def program_render_mode(self): return RenderMode.markdown class TrackGroupForm(IndicoForm): title = StringField(_('Title'), [DataRequired()]) description = IndicoMarkdownField(_('Description'), editor=True)
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import copy from ansible import constants as C from ansible.plugins.action.normal import ActionModule as _ActionModule from ansible.module_utils.network_common import load_provider from ansible.module_utils.nxos import nxos_provider_spec try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): provider = load_provider(nxos_provider_spec, self._task.args) transport = provider['transport'] or 'cli' if self._play_context.connection != 'local' and transport == 'cli': return dict( failed=True, msg='invalid connection specified, expected connection=local, ' 'got %s' % self._play_context.connection ) display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr) if transport == 'cli': pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'nxos' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port'] or self._play_context.port or 22) pc.remote_user = provider['username'] or self._play_context.connection_us
er pc.pa
ssword = provider['password'] or self._play_context.password pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} # make sure we are in the right cli context which should be # enable mode and not config module rc, out, err = connection.exec_command('prompt()') while str(out).strip().endswith(')#'): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) connection.exec_command('exit') rc, out, err = connection.exec_command('prompt()') task_vars['ansible_socket'] = socket_path else: provider['transport'] = 'nxapi' if provider.get('host') is None: provider['host'] = self._play_context.remote_addr if provider.get('port') is None: if provider.get('use_ssl'): provider['port'] = 443 else: provider['port'] = 80 if provider.get('timeout') is None: provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT if provider.get('username') is None: provider['username'] = self._play_context.connection_user if provider.get('password') is None: provider['password'] = self._play_context.password if provider.get('use_ssl') is None: provider['use_ssl'] = False if provider.get('validate_certs') is None: provider['validate_certs'] = True self._task.args['provider'] = provider # make sure a transport value is set in args self._task.args['transport'] = transport result = super(ActionModule, self).run(tmp, task_vars) return result
import _plotly_utils.basevalidators class TypesrcValidator(_plotly_utils.basevalidators.
SrcValidator): def __init__( self
, plotly_name="typesrc", parent_name="scatterternary.marker.gradient", **kwargs ): super(TypesrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), **kwargs )
#!/usr/bin/env python import os.path import sys # Version file managment scheme and graceful degredation for # setuptools borrowed and adapted from GitPython. try: from setuptools import setup, find_packages # Silence pyflakes assert setup assert find_packages except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages if sys.version_info < (2, 6): raise RuntimeError('Python versions < 2.6 are not supported.') # Utility function to read the contents of short files. def read(fname): with open(os.path.join(os.path.dirname(__file__), fname)) as f: return f.read() VERSION = read(os.path.join('wal_e', 'VERSION')).strip() install_requires = [ l for l in read('requirements.txt').split('\n') if l and not l.startswith('#')] if sys.version_info < (2, 7): install_requires.append('argparse>=0.8') setup( name="wal-e", version=VERSION, packages=find_packages(), install_requires=install_requires, # metadata for upload to PyPI author="The WAL-E Contributors", author_
email="wal-e@googlegroups.com", maintainer="Daniel Farina", maintainer_email="daniel@heroku.com", description="Continuous Archiving for Postgres", long_description=read('README.rst'), classifiers=['Topic :: Database', 'Topic ::
System :: Archiving', 'Topic :: System :: Recovery Tools'], platforms=['any'], license="BSD", keywords=("postgres postgresql database backup archive archiving s3 aws " "openstack swift wabs azure wal shipping"), url="https://github.com/wal-e/wal-e", # Include the VERSION file package_data={'wal_e': ['VERSION']}, # install entry_points={'console_scripts': ['wal-e=wal_e.cmd:main']})