text stringlengths 38 1.54M |
|---|
# Get times of all water flows for experiments and match them to the flows from the events file.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from itertools import cycle
import os as os
from pylab import *
import datetime
data_location = '../2_Data/'
info_location = '../3_Info/'
output_location = '../3_Info/Flow_Times/'
if not os.path.exists(output_location):
os.makedirs(output_location)
flow_times = {}
# Load flow data from dictionary
all_flow_data = pickle.load(open(data_location + 'all_flow_data.dict', 'rb'))
all_event_data = pickle.load(open(info_location + 'Events/all_exp_events.dict', 'rb'))
for exp in list(all_flow_data.keys()):
print(exp)
flow_times[exp] = pd.DataFrame(columns=['Time'])
started_flow = False
for val in all_flow_data[exp].index.values:
# if exp == 'Experiment_21_Data':
# plt.plot(all_flow_data[exp]['GPM'])
# plt.show()
# exit()
if int(all_flow_data[exp]['GPM'][val]) < 10:
started_flow = False
if started_flow == True:
continue
if all_flow_data[exp]['GPM'][val] > 10:
flow_times[exp] = flow_times[exp].append(pd.DataFrame([val], columns=['Time']), ignore_index=True)
started_flow = True
else:
started_flow = False
flow_times[exp].to_csv(output_location + exp + '.csv')
|
##
## junxonauth.py
## Author : <shekhar@inf.in>
## Started on Fri May 14 11:28:18 2010 Shashishekhar S
## $Id$
##
## Copyright (C) 2010 INFORMEDIA
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
from django.contrib.auth.models import User, check_password
from checkin.models import Subscriber
from django.http import HttpRequest
import sys
class JunxonBackend:
def authenticate(self, username=None, password=None, request=None):
try:
subchk = Subscriber.objects.get(email=username)
if (password == subchk.accesskey):
passwd_valid = True
else:
passwd_valid = False
rem_ipaddress = request.META.get('REMOTE_ADDR')
# if (subchk.ipaddress != rem_ipaddress):
# passwd_valid = False
print >>sys.stderr, "User authenticated: ", username
except Subscriber.DoesNotExist:
print >>sys.stderr, "User not found: ", username
return None
if passwd_valid:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User(username=username, password=subchk.accesskey)
user.is_staff = False
user.is_superuser = False
user.first_name = subchk.name
user.is_active = True
user.email = username
user.save()
return user
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
import re
import barcode
import logging
import operator
from django.core.files.storage import default_storage
from django.utils import timezone
from django.db import transaction
from django.conf import settings
from contextlib import suppress
from django.urls import reverse
from django.db.models import Q
from django.db import models
from polymorphic.models import PolymorphicModel
from common.models import EstadoMixin, UltimaModificacionMixin
from .managers import ProgramaManager, PruebaQuerySet
from .utils import nuevo_radicado
from . import managers
from . import enums
logger = logging.getLogger(__name__)
class Programa(models.Model):
"""Modelo usado para guardar los programas que existen en un laboratorio."""
nombre = models.CharField(max_length=100)
codigo = models.CharField('código', max_length=100)
# managers
objects = ProgramaManager()
class Meta:
verbose_name = 'programa'
verbose_name_plural = 'programas'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
self.codigo = self.codigo.lower()
super(Programa, self).save(*args, **kwargs)
class Area(models.Model):
"""Modelo usado para guardar las áreas que de cada programa."""
nombre = models.CharField(max_length=100)
programa = models.ForeignKey(Programa, related_name='areas', on_delete=models.CASCADE)
temperatura_minima = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
temperatura_maxima = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
humedad_minima = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
humedad_maxima = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True)
oculto = models.BooleanField(default=False)
class Meta:
verbose_name = 'area'
verbose_name_plural = 'areas'
def __str__(self):
return self.nombre.title()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Area, self).save(*args, **kwargs)
class RegistroTemperaturaArea(models.Model):
"""Modelo usado para registrar las temperaturas diarias de las areas."""
# opciones
CENTIGRADOS = 'C'
FARHENHEIT = 'F'
UNIDADES = (
(CENTIGRADOS, 'Celsius'),
(FARHENHEIT, 'Farhenheit'),
)
area = models.ForeignKey(Area, related_name='registros', on_delete=models.CASCADE)
fecha_registro = models.DateTimeField()
temperatura = models.DecimalField(max_digits=7, decimal_places=3)
humedad = models.DecimalField(max_digits=7, decimal_places=3)
unidad = models.CharField(max_length=1, choices=UNIDADES)
observaciones = models.TextField()
registrado_por = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
class Meta:
ordering = ['-fecha_registro']
verbose_name = 'registro de temperatura area'
verbose_name_plural = 'registros de temperatura area'
def __str__(self):
return 'Registro de %(temperatura)f°%(unidad)s y %(humedad)f de Humedad para %(area)s' % {
'temperatura': self.temperatura, 'unidad': self.unidad,
'area': self.area.nombre, 'humedad': self.humedad
}
def save(self, *args, **kwargs):
self.observaciones = self.observaciones.lower()
super(RegistroTemperaturaArea, self).save(*args, **kwargs)
def alerta(self):
"""Retorna Verdadero si se paso de el rango"""
if self.unidad == self.CENTIGRADOS:
temperatura = self.temperatura
else:
from equipos.utils import convertidor_unidad_temperatura
temperatura = convertidor_unidad_temperatura(self.temperatura, self.unidad, self.CENTIGRADOS)
if temperatura > self.area.temperatura_maxima or temperatura < self.area.temperatura_minima:
return True
elif self.humedad > self.area.humedad_maxima or self.humedad < self.area.humedad_minima:
return True
return False
class ResultadoPrueba(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los resultados posibles de las pruebas que se hacen en el laboratorio."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'resultado'
verbose_name_plural = 'resultados'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(ResultadoPrueba, self).save(*args, **kwargs)
class Metodo(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los métodos usados en las pruebas."""
nombre = models.CharField(max_length=100)
objeto = models.CharField(max_length=200)
class Meta:
ordering = ['nombre']
verbose_name = 'metodo'
verbose_name_plural = 'metodos'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
self.objeto = self.objeto.lower()
super(Metodo, self).save(*args, **kwargs)
class Prueba(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar las pruebas que se realizan en cada área."""
nombre = models.CharField(max_length=100)
area = models.ForeignKey(Area, related_name='pruebas', on_delete=models.CASCADE)
duracion = models.DurationField('duración')
resultados = models.ManyToManyField(ResultadoPrueba, related_name='pruebas')
metodos = models.ManyToManyField(Metodo, related_name='pruebas')
valores_referencia = models.CharField(max_length=100, verbose_name='valores de referencia', blank=True)
# managers
objects = PruebaQuerySet.as_manager()
class Meta:
ordering = ['nombre']
verbose_name = 'prueba'
verbose_name_plural = 'pruebas'
def __str__(self):
return self.nombre.upper()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Prueba, self).save(*args, **kwargs)
aguas = Programa.objects.aguas()
if self.area.programa == aguas:
if self.estado == EstadoMixin.INACTIVO:
pruebas = PruebasRealizadas.objects.filter(estado=PruebasRealizadas.CONSERVACION, prueba=self)
pruebas.delete()
else:
ingresos = Recepcion.objects.filter(programa=aguas)
ingresos = ingresos.exclude(Q(estado=Recepcion.RECHAZADO) | Q(estado_analista=Recepcion.RECHAZADO))
ingresos = ingresos.exclude(reportes__confirmado=True)
ingresos = ingresos.exclude(muestras__pruebas=self)
for ingreso in ingresos:
if ingreso.cumplimiento < 100:
for muestra in ingreso.muestras.non_polymorphic().all():
if muestra.areas.filter(id=self.area.id).exists():
PruebasRealizadas.objects.create(muestra=muestra, prueba=self)
class MotivoRechazo(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los motivos por el cual se puede rechazar una muestra que llega al laboratorio."""
motivo = models.CharField(max_length=250)
class Meta:
verbose_name = 'motivo de rechazo'
verbose_name_plural = 'motivos de rechazo'
def __str__(self):
return self.motivo.capitalize()
def save(self, *args, **kwargs):
self.motivo = self.motivo.lower()
super(MotivoRechazo, self).save(*args, **kwargs)
class Recepcion(models.Model):
"""Modelo usado para guardar los registros de recepción de las muestras que llegan a un laboratorio."""
# opciones
ACEPTADO = 'A'
RECHAZADO = 'R'
ESTADOS_RECEPCIONISTA = (
(ACEPTADO, 'Aceptado para verificación'),
(RECHAZADO, 'Rechazado'),
)
ESTADOS_ANALISTA = (
(ACEPTADO, 'Aceptado'),
(RECHAZADO, 'Rechazado'),
)
programa = models.ForeignKey(Programa, on_delete=models.CASCADE)
indice_radicado = models.BigIntegerField()
fecha_radicado = models.DateTimeField('fecha de radicación', auto_now_add=True)
fecha_recepcion = models.DateTimeField('fecha de recepción')
recepcionista = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='ingresos_recepcionista', on_delete=models.CASCADE)
estado = models.CharField(max_length=1, blank=True, choices=ESTADOS_RECEPCIONISTA)
observaciones = models.TextField(blank=True)
motivo_rechazo = models.ManyToManyField(MotivoRechazo, verbose_name='motivos de rechazo', blank=True, related_name='rechazos_recepcionista')
confirmada = models.BooleanField(default=False)
fecha_confirmacion = models.DateTimeField('fecha de confirmación', blank=True, null=True)
confirmado_por = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='+', on_delete=models.SET_NULL, blank=True, null=True)
comentario = models.CharField(max_length=300, blank=True,
help_text='Por favor indique que acción tomo con el ingreso respecto al rechazo del analista.')
# Confirmacion analista
estado_analista = models.CharField('estado', max_length=1, blank=True, choices=ESTADOS_ANALISTA)
observaciones_analista = models.TextField('observaciones', blank=True)
motivo_rechazo_analista = models.ManyToManyField(MotivoRechazo, verbose_name='motivos de rechazo', blank=True,
related_name='rechazos_analista')
fecha_estado_analista = models.DateTimeField('fecha de estado', blank=True, null=True)
analista = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='ingresos_analista', on_delete=models.SET_NULL)
responsable_tecnico = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='ingresos_responsable_tecnico', on_delete=models.SET_NULL)
fecha_envio_resultados = models.DateTimeField(blank=True, null=True)
# managers
objects = managers.RecepcionManager()
class Meta:
ordering = ['-indice_radicado']
verbose_name = 'recepción'
verbose_name_plural = 'recepciones'
def __str__(self):
return self.radicado
def save(self, *args, **kwargs):
if self.observaciones:
self.observaciones = self.observaciones.lower()
if self.comentario:
self.comentario = self.comentario.lower()
if self.observaciones_analista:
self.observaciones_analista = self.observaciones_analista.lower()
if not self.id:
with transaction.atomic():
self.indice_radicado = nuevo_radicado(alternativo=self.usar_radicado_alternativo)
super(Recepcion, self).save(*args, **kwargs)
else:
super(Recepcion, self).save(*args, **kwargs)
logger.debug(default_storage.location)
if not default_storage.exists('barcode/' + self.radicado + '.svg'):
logger.debug('no existe')
Code39 = barcode.get_barcode_class('code39')
codigo = Code39(str(self.id), add_checksum=False)
codigo.save(
settings.MEDIA_ROOT + '/barcode/' + self.radicado,
options={'module_width': 0.5, 'module_height': 17, 'text': self.radicado, 'write_text': False}
)
@property
def digitado_por(self):
return self.confirmado_por or self.recepcionista
@property
def aceptado_analista(self):
return self.estado_analista == self.ACEPTADO
@property
def radicado(self):
return '{0}-{1}'.format(self.fecha_recepcion.year, self.indice_radicado)
@property
def usar_radicado_alternativo(self):
"""Devuelve True si el ingreso hace parte de los programas EEID, EEDD o Citohistopatologia de lo
contrario devuelve False."""
return self.programa.codigo in [enums.ProgramaEnum.EEDD.value, enums.ProgramaEnum.EEID.value, enums.ProgramaEnum.CITOHISTOPATOLOGIA.value]
@property
def usa_resultado_numerico(self):
return self.is_programa_ambientes
@property
def has_concepto(self):
return self.is_programa_ambientes
@property
def has_valores_referencia(self):
return self.is_programa_ambientes
@property
def can_actualizar_estado_todas_pruebas(self):
return self.is_programa_ambientes
@property
def is_programa_ambientes(self):
return self.programa.codigo in [enums.ProgramaEnum.AGUAS.value, enums.ProgramaEnum.ALIMENTOS.value, enums.ProgramaEnum.BEBIDAS_ALCOHOLICAS.value]
@property
def tipo(self):
muestra = self.muestras.first()
with suppress(Exception):
return muestra.tipo
try:
muestra.agua
return 'agua'
except Agua.DoesNotExist:
pass
try:
muestra.entomologia
return 'entomologia'
except Entomologia.DoesNotExist:
pass
try:
muestra.eedd
return 'evaluacion externa desempeño directo'
except EEDD.DoesNotExist:
pass
try:
muestra.eeid
return 'evaluacion externa desempeño indirecto'
except EEID.DoesNotExist:
pass
try:
muestra.bebidaalcoholica
return 'bebidas_alcoholicas'
except:
pass
try:
muestra.alimento
return 'alimentos'
except:
pass
@property
def areas(self):
pruebas = Prueba.objects.filter(muestra__in=self.muestras.non_polymorphic().all())
# para resultado_emitido
return Area.objects.filter(pruebas__in=pruebas, oculto=False).distinct()
@property
def muestra(self):
return self.muestras.first()
@property
def solicitante(self):
muestra = self.muestras.first()
return muestra.solicitante
@property
def informe(self):
return self.reportes.first()
@property
def fecha_proceso(self):
return self.fecha_estado_analista
@property
def cumplimiento(self):
"""El cumplimiento es calculado mediante el promedio aritmetico de los cumplimientos de las pruebas
realizadas."""
muestras = self.muestras.non_polymorphic().all()
pruebas = PruebasRealizadas.objects.filter(muestra__in=muestras)
cumplimiento = 0
for prueba in pruebas:
cumplimiento += prueba.cumplimiento
try:
cumplimiento = round(cumplimiento / pruebas.count(), 2)
except ZeroDivisionError:
cumplimiento = 0
return cumplimiento
@property
def estado_(self):
"""
Retorna el estado de el ingreso, de acuerdo a donde se encuentre la recepcion en el momento
de la trazabilidad
"""
if self.reportes.aprobados().exists() and self.confirmada:
return enums.EstadoIngresoEnum.RESULTADO.value
if self.reportes.no_aprobados().exists() and self.confirmada:
return enums.EstadoIngresoEnum.EN_APROBACION.value
elif self.estado == self.ACEPTADO and self.estado_analista == self.ACEPTADO:
return enums.EstadoIngresoEnum.EN_CURSO.value
elif self.estado == self.ACEPTADO and self.estado_analista == '':
return enums.EstadoIngresoEnum.PENDIENTE.value
return enums.EstadoIngresoEnum.RECHAZADA.value
@property
def estado_resultado(self):
"""Indica el estado de los resultados. Sin resultado, Resultado no enviado, Resultado y enviado."""
if self.estado_ == enums.EstadoIngresoEnum.RESULTADO.value:
if not self.fecha_envio_resultados:
return enums.EstadoResultadoEnum.RESULTADO_NO_ENVIADO
else:
return enums.EstadoResultadoEnum.RESULTADO_ENVIADO
return enums.EstadoResultadoEnum.SIN_RESULTADO
@property
def url_estado_ingreso(self):
"""Devuelve la URL en la cual se define el estado de un ingreso."""
if self.programa.codigo == enums.ProgramaEnum.CLINICO.value:
return reverse('trazabilidad:estado_muestra_clinica', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.COVID19.value:
return reverse('covid19:estado_muestra', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.AGUAS.value:
return reverse('trazabilidad:estado_muestra_agua', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.ENTOMOLOGIA.value:
return reverse('trazabilidad:estado_muestra_entomologia', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.CITOHISTOPATOLOGIA.value:
return reverse('trazabilidad:estado_muestra_citohistopatologia', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.BANCO_SANGRE.value:
return reverse('trazabilidad:estado_muestra_banco_sangre', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.EEDD.value:
return reverse('trazabilidad:estado_muestra_eedd', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.EEID.value:
return reverse('trazabilidad:estado_muestra_eeid', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.ALIMENTOS.value:
return reverse('alimentos:estado_muestra', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.BEBIDAS_ALCOHOLICAS.value:
return reverse('bebidas_alcoholicas:estado_muestra', args=(self.id, ))
@property
def url_editar_ingreso(self):
"""Devuelve la URL en la cual se edita la información de un ingreso."""
if self.programa.codigo == enums.ProgramaEnum.CLINICO.value:
return reverse('trazabilidad:actualizar_muestra_clinica', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.COVID19.value:
return reverse('covid19:actualizar_muestra', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.AGUAS.value:
return reverse('trazabilidad:actualizar_muestra_agua', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.ENTOMOLOGIA.value:
return reverse('trazabilidad:actualizar_muestra_entomologia', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.CITOHISTOPATOLOGIA.value:
return reverse('trazabilidad:actualizar_muestra_citohistopatologia', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.BANCO_SANGRE.value:
return reverse('trazabilidad:actualizar_muestra_banco_sangre', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.EEDD.value:
return reverse('trazabilidad:actualizar_muestra_eedd', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.EEID.value:
return reverse('trazabilidad:actualizar_muestra_eeid', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.ALIMENTOS.value:
return reverse('alimentos:actualizar_muestra', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.BEBIDAS_ALCOHOLICAS.value:
return reverse('bebidas_alcoholicas:actualizar_muestra', args=(self.id, ))
@property
def url_radicado_ingreso(self):
"""Devuelve la URL en la cual se edita la información de un ingreso."""
if self.programa.codigo == enums.ProgramaEnum.CLINICO.value:
return reverse('trazabilidad:radicado_muestra_clinica', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.COVID19.value:
return reverse('covid19:radicado_muestra', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.AGUAS.value:
return reverse('trazabilidad:radicado_muestra_agua', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.ENTOMOLOGIA.value:
return reverse('trazabilidad:radicado_muestra_entomologia', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.CITOHISTOPATOLOGIA.value:
return reverse('trazabilidad:radicado_muestra_citohistopatologia', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.BANCO_SANGRE.value:
return reverse('trazabilidad:radicado_muestra_banco_sangre', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.EEDD.value:
return reverse('trazabilidad:radicado_muestra_eedd', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.EEID.value:
return reverse('trazabilidad:radicado_muestra_eeid', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.ALIMENTOS.value:
return reverse('alimentos:radicado_muestra', args=(self.id,))
elif self.programa.codigo == enums.ProgramaEnum.BEBIDAS_ALCOHOLICAS.value:
return reverse('bebidas_alcoholicas:radicado_muestra', args=(self.id, ))
def crear_codigos_muestra(self):
"""Crea los codigos de barra para cada muestra."""
muestras = self.muestras.non_polymorphic().all()
for index, muestra in enumerate(muestras):
i = index + 1
if default_storage.exists('barcode/{0}.svg'.format(muestra)):
logger.debug('existe')
else:
logger.debug('no existe')
if self.programa in Programa.objects.ambientes():
texto = '{0}-{1}'.format(self.radicado, i)
else:
texto = '{0}'.format(self.radicado)
Code39 = barcode.get_barcode_class('code39')
codigo = Code39(str(muestra.id), add_checksum=False)
options = {'module_width': 0.5, 'module_height': 17, 'text': texto, 'write_text': False}
codigo.save('{0}/barcode/{1}'.format(settings.MEDIA_ROOT, muestra), options=options)
Ingreso = Recepcion
class Muestra(PolymorphicModel):
"""Modelo usado para guardar las muestras ingresadas en un laboratorio."""
registro_recepcion = models.ForeignKey(Recepcion, verbose_name='ingreso', related_name='muestras', on_delete=models.CASCADE)
pruebas = models.ManyToManyField(Prueba, through='PruebasRealizadas', blank=True)
observacion = models.TextField('Observacion', max_length=500)
temp_ingreso = models.FloatField(blank=True, null=True, verbose_name='temperatura de muestra ºC')
temp_procesamiento = models.FloatField(blank=True, null=True, verbose_name='temperatura de procesamiento ºC')
class Meta:
ordering = ['id']
verbose_name = 'muestra'
verbose_name_plural = 'muestras'
def __str__(self):
return '{0}-{1}'.format(self.registro_recepcion.radicado, self.id)
@property
def areas(self):
return Area.objects.filter(pruebas__in=self.pruebas.all(), oculto=False).distinct()
@property
def solicitante(self):
raise NotImplementedError
@property
def tipo(self):
raise NotImplementedError
class PruebasRealizadas(models.Model):
"""Modelo usado para guardar la pruebas que se le realizan a una muestra."""
# opciones
CONSERVACION = 'C'
PRE_ANALISIS = 'P'
ANALISIS = 'A'
RESULTADO = 'R'
ESTADOS = (
(CONSERVACION, 'Conservación'),
(PRE_ANALISIS, 'Pre-Analisis'),
(ANALISIS, 'Analisis'),
(RESULTADO, 'Prueba finalizada'),
)
muestra = models.ForeignKey(Muestra, on_delete=models.CASCADE)
prueba = models.ForeignKey(Prueba, on_delete=models.CASCADE)
estado = models.CharField(max_length=1, choices=ESTADOS, default=CONSERVACION)
fecha_pre_analisis = models.DateTimeField(blank=True, null=True)
ultima_modificacion = models.DateTimeField(blank=True, null=True)
observacion_semaforo = models.CharField(
'observaciones',
max_length=200,
blank=True,
help_text='Indique porque ha durado mas tiempo del establecido para terminar la prueba.'
)
resultados = models.ManyToManyField(ResultadoPrueba, blank=True, related_name='resultados')
resultado_numerico = models.CharField(verbose_name='resultado numerico', max_length=150, blank=True)
metodo = models.ForeignKey(Metodo, blank=True, null=True, on_delete=models.SET_NULL)
concepto = models.CharField('concepto', max_length=255, blank=True)
objects = managers.PruebasRealizadasManager()
class Meta:
ordering = ['prueba']
verbose_name = 'prueba realizada'
verbose_name_plural = 'pruebas realizadas'
unique_together = ('muestra', 'prueba')
@property
def cumplimiento(self):
"""El cumplimiento se mide según el estado en que se encuentra la prueba.
Conservación --> 0%
Pre-Analisis --> 33%
Analisis --> 66%
Resultado --> 100%
"""
if self.estado == self.CONSERVACION:
cumplimiento = 0
elif self.estado == self.PRE_ANALISIS:
cumplimiento = 33
elif self.estado == self.ANALISIS:
cumplimiento = 66
elif self.estado == self.RESULTADO:
cumplimiento = 100
return cumplimiento
@property
def color_semaforo(self):
"""El color del semáforo es calculado según el tiempo de duración de la prueba y el tiempo que ha pasado desde
que fue iniciado el pre-analisis.
rojo --> tiempo del proceso lleva mas de la duración la prueba.
amarillo --> tiempo del proceso lleva 2/3 de la duración de la prueba.
verde --> tiempo del proceso lleva 1/3 de la duración de la prueba.
"""
if self.estado != self.CONSERVACION:
if self.estado == self.RESULTADO:
ahora = self.ultima_modificacion
else:
ahora = timezone.now()
delta = ahora - self.fecha_pre_analisis
dos_tecios_duracion = self.prueba.duracion * (2 / 3)
if delta > self.prueba.duracion:
return 'rojo'
elif delta >= dos_tecios_duracion:
return 'amarillo'
else:
return 'verde'
else:
return None
@property
def con_resultado(self):
return self.estado == self.RESULTADO
@property
def en_analisis(self):
return self.estado == self.ANALISIS
def actualizar_estado(self):
"""Actualiza el estado de una prueba."""
if self.estado == self.CONSERVACION:
self.estado = self.PRE_ANALISIS
self.fecha_pre_analisis = timezone.now()
elif self.estado == self.PRE_ANALISIS:
self.estado = self.ANALISIS
elif self.estado == self.ANALISIS:
self.estado = self.RESULTADO
self.ultima_modificacion = timezone.now()
self.save()
class Eps(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar las eps a la cual se puede asociar un paciente."""
nombre = models.CharField(max_length=50)
class Meta:
verbose_name = 'eps'
verbose_name_plural = 'eps'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Eps, self).save(*args, **kwargs)
class Paciente(UltimaModificacionMixin):
"""Modelo usado para guardar los pacientes a los cuales se les han recogido muestras."""
# opciones
HORAS = 'H'
MESES = 'M'
DIAS = 'D'
ANOS = 'A'
UNIDADES_MEDIDA = (
(ANOS, 'Años'),
(MESES, 'Meses'),
(DIAS, 'Dias'),
(HORAS, 'Horas'),
)
CEDULA_EXTRANJERIA = 'CE'
TARJETA_IDENTIDAD = 'TI'
REGISTRO_CIVIL = 'RC'
PASAPORTE = 'P'
CEDULA = 'CC'
CODIGO = 'CO'
CEDULACOD = 'CD'
SIN_IDENTIFICACION = 'NN'
TIPO_IDENTIFICACION = (
(CEDULA, 'CC'),
(TARJETA_IDENTIDAD, 'TI'),
(REGISTRO_CIVIL, 'RC'),
(PASAPORTE, 'P'),
(CEDULA_EXTRANJERIA, 'CE'),
(CODIGO, 'COD'),
(CEDULACOD, 'CC/COD'),
(SIN_IDENTIFICACION, 'SIN DATOS'),
)
FEMENINO = 'F'
MASCULINO = 'M'
GENEROS = (
(FEMENINO, 'Femenino'),
(MASCULINO, 'Masculino'),
)
email = models.EmailField(blank=True)
nombre = models.CharField('nombres', max_length=100, blank=True)
apellido = models.CharField('apellidos', max_length=100, blank=True)
direccion = models.CharField('dirección', max_length=100, blank=True)
identificacion = models.CharField('documento id', max_length=50)
tipo_identificacion = models.CharField(max_length=2, choices=TIPO_IDENTIFICACION)
edad = models.IntegerField(blank=True, null=True)
tipo_edad = models.CharField(max_length=1, choices=UNIDADES_MEDIDA, blank=True)
fecha_nacimiento = models.DateField(verbose_name='fecha de nacimiento', blank=True, null=True)
eps = models.ForeignKey(Eps, verbose_name='eps-plan', related_name='pacientes', on_delete=models.CASCADE, blank=True, null=True)
sexo = models.CharField(max_length=1, choices=GENEROS, blank=True)
objects = managers.PacienteManager()
class Meta:
verbose_name = 'paciente'
verbose_name_plural = 'pacientes'
def __str__(self):
return '{0} {1}'.format(self.nombre.title(), self.apellido.title())
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
self.apellido = self.apellido.lower()
self.direccion = self.direccion.lower()
super(Paciente, self).save(*args, **kwargs)
class Epsa(UltimaModificacionMixin):
"""Modelo usado para guardar las empresas de suministro de agua."""
# opciones
EPSA = 'EP'
ACUEDUCTO_VEREDAL = 'AV'
NO_EPSA = 'NE'
EPSA_SIN_REGISTRO = 'ES'
TIPOS = (
(EPSA, 'Empresa prestadora de servicio de acueducto'),
(ACUEDUCTO_VEREDAL, 'Acueducto veredal'),
(NO_EPSA, 'No tiene EPSA'),
(EPSA_SIN_REGISTRO, 'EPSA sin registro conocido'),
)
nombre = models.CharField('nombre epsa', max_length=100)
direccion = models.CharField('dirección', max_length=100)
rup = models.CharField('n. rup', max_length=50)
nit = models.CharField('n. nit', max_length=50)
tipo = models.CharField('tipo de epsa', max_length=2, choices=TIPOS)
class Meta:
verbose_name = 'epsa'
verbose_name_plural = 'epsas'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
self.direccion = self.direccion.lower()
super(Epsa, self).save(*args, **kwargs)
class Departamento(UltimaModificacionMixin):
"""Modelo usado para guardar los datos de los departamentos."""
nombre = models.CharField(max_length=100)
codigo = models.IntegerField()
class Meta:
verbose_name = 'departamento'
verbose_name_plural = 'departamentos'
def __str__(self):
return self.nombre.title()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Departamento, self).save(*args, **kwargs)
class Municipio(UltimaModificacionMixin):
"""Modelo usado para guardar los municipios de un departamento."""
nombre = models.CharField(max_length=100)
departamento = models.ForeignKey(Departamento, related_name='municipios', on_delete=models.CASCADE)
codigo = models.IntegerField()
email = models.EmailField()
class Meta:
verbose_name = 'municipio'
verbose_name_plural = 'municipios'
def __str__(self):
return self.nombre.title()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Municipio, self).save(*args, **kwargs)
class Poblado(UltimaModificacionMixin):
"""Modelo usado para guardar los poblados de un municipio."""
nombre = models.CharField(max_length=100)
municipio = models.ForeignKey(Municipio, related_name='poblados', on_delete=models.CASCADE)
codigo = models.IntegerField()
epsa = models.ForeignKey(Epsa, related_name='poblados', on_delete=models.CASCADE)
class Meta:
verbose_name = 'poblado'
verbose_name_plural = 'poblados'
def __str__(self):
return self.nombre.title()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Poblado, self).save(*args, **kwargs)
class Institucion(UltimaModificacionMixin):
"""Modelo usado para guardar las instituciones en donde se recolecta una muestra."""
nombre = models.CharField(max_length=100)
municipio = models.ForeignKey(Municipio, related_name='instituciones', on_delete=models.CASCADE)
class Meta:
verbose_name = 'institución'
verbose_name_plural = 'instituciones'
def __str__(self):
return '{0} ({1}-{2})'.format(self.nombre.capitalize(), self.municipio.departamento, self.municipio)
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Institucion, self).save(*args, **kwargs)
class TipoMuestra(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los tipos de muestra que llegan al laboratorio."""
nombre = models.CharField(max_length=50)
programas = models.ManyToManyField(Programa)
objects = managers.TipoMuestraManager()
class Meta:
verbose_name = 'tipo de muestra'
verbose_name_plural = 'tipos de muestra'
def __str__(self):
return self.nombre.upper()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(TipoMuestra, self).save(*args, **kwargs)
class Clinica(Muestra):
"""Modelo usado para guardar las muestras clinicas ingresadas a un laboratorio.
El modelo extiende de Muestra."""
# opciones
SI = True
NO = False
SI_NO_OPCIONES = (
(SI, 'Si'),
(NO, 'No'),
)
paciente = models.ForeignKey(Paciente, related_name='muestras', on_delete=models.CASCADE)
tipo_muestras = models.ManyToManyField(TipoMuestra, verbose_name='tipo de muestra', blank=True, related_name='tipos_muestra')
institucion = models.ForeignKey(Institucion, blank=True, null=True, on_delete=models.SET_NULL)
municipio = models.ForeignKey(Municipio, related_name='muestras_clinicas', on_delete=models.CASCADE)
barrio = models.CharField(max_length=100)
embarazada = models.BooleanField(default=False, choices=SI_NO_OPCIONES)
class Meta:
verbose_name = 'muestra clinica'
verbose_name_plural = 'muestras clinicas'
@property
def solicitante(self):
return self.institucion
@property
def tipo(self):
return enums.TipoMuestraEnum.CLINICA.value
class CategoriaAgua(UltimaModificacionMixin):
"""Modelo usado para guardar las distintas categorias de agua."""
nombre = models.CharField(max_length=50)
class Meta:
verbose_name = 'categoria de agua'
verbose_name_plural = 'categorias de agua'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(CategoriaAgua, self).save(*args, **kwargs)
class TipoAgua(UltimaModificacionMixin):
"""Modelo usado para guardar los tipo de agua de las muestras de agua."""
nombre = models.CharField(max_length=50)
categoria = models.ForeignKey(CategoriaAgua, on_delete=models.CASCADE)
class Meta:
verbose_name = 'tipo de agua'
verbose_name_plural = 'tipos de agua'
def __str__(self):
return "{0} - {1}".format(self.nombre.capitalize(), self.categoria.nombre)
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(TipoAgua, self).save(*args, **kwargs)
class Temperatura(UltimaModificacionMixin):
"""Modelo usado para guardar las temperaturas posibles que tienen las muestras de agua."""
valor = models.CharField(max_length=50)
class Meta:
verbose_name = 'temperatura'
verbose_name_plural = 'temperaturas'
def __str__(self):
return self.valor.capitalize()
def save(self, *args, **kwargs):
self.valor = self.valor.lower()
super(Temperatura, self).save(*args, **kwargs)
class Solicitante(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los solicitantes de toma de muestras de agua."""
nombre = models.CharField(max_length=200)
direccion = models.CharField('dirección', max_length=100)
telefono = models.IntegerField()
class Meta:
verbose_name = 'solicitante'
verbose_name_plural = 'solicitantes'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Solicitante, self).save(*args, **kwargs)
class InformacionAgua(models.Model):
"""Modelo usado para guardar la información común que manejan las muestras de aguas."""
poblado = models.ForeignKey(Poblado, on_delete=models.CASCADE)
fecha_recoleccion = models.DateField('fecha de recolección', blank=True, null=True)
tipo_agua = models.ForeignKey(TipoAgua, verbose_name='tipo de agua', blank=True, null=True, on_delete=models.SET_NULL)
responsable_toma = models.CharField('responsable de la toma', max_length=100, blank=True)
temperatura = models.ForeignKey(Temperatura, blank=True, null=True, on_delete=models.SET_NULL)
solicitante = models.ForeignKey(Solicitante, blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = 'información general de las muestras de agua'
verbose_name_plural = 'información general de las muestras de agua'
def __str__(self):
return self.poblado.epsa.nombre
def save(self, *args, **kwargs):
if self.responsable_toma:
self.responsable_toma = self.responsable_toma.lower()
super(InformacionAgua, self).save(*args, **kwargs)
class MotivoAnalisis(models.Model):
"""Modelo usado para guardar los motivos que se tienen para analizar una muestra de agua."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'motivo de analisis'
verbose_name_plural = 'motivos de analisis'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(MotivoAnalisis, self).save(*args, **kwargs)
class DescripcionPunto(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar las descripciones de los puntos de toma de las muestras de agua."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'descripción del punto de toma'
verbose_name_plural = 'descripciónes de los puntos de toma'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(DescripcionPunto, self).save(*args, **kwargs)
class FuenteAbastecimiento(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar las fuentes de abastecimiento de las muestras de agua."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'fuente de abastecimiento'
verbose_name_plural = 'fuentes de abastecimiento'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(FuenteAbastecimiento, self).save(*args, **kwargs)
class LugarPunto(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los lugares del punto de toma en donde se recogen las muestras de agua."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'lugar del punto de toma'
verbose_name_plural = 'lugares de los puntos de toma'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(LugarPunto, self).save(*args, **kwargs)
class CodigoPunto(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los códigos de los punto de los poblados en donde se recogen las muestras de agua."""
# opciones
SI = True
NO = False
SI_NO_OPCIONES = (
(SI, 'Si'),
(NO, 'No'),
)
codigo = models.CharField('código', max_length=20)
direccion = models.CharField('dirección', max_length=100)
lugar_toma = models.ForeignKey(LugarPunto, verbose_name='lugar del punto de toma', on_delete=models.CASCADE)
descripcion = models.ForeignKey(DescripcionPunto, verbose_name='descripción punto de toma', on_delete=models.CASCADE)
fuente_abastecimiento = models.ForeignKey(FuenteAbastecimiento, verbose_name='fuente de abastecimiento', on_delete=models.CASCADE)
punto_intradomiciliario = models.BooleanField(choices=SI_NO_OPCIONES)
poblado = models.ForeignKey(Poblado, on_delete=models.CASCADE)
class Meta:
verbose_name = 'codigo del punto'
verbose_name_plural = 'codigos de los puntos'
def __str__(self):
return self.codigo.lower()
def save(self, *args, **kwargs):
self.codigo = self.codigo.lower()
self.direccion = self.direccion.lower()
super(CodigoPunto, self).save(*args, **kwargs)
class PuntajeRiesgo(UltimaModificacionMixin):
"""
Modelo para guardar los puntajes de riesgo de cada prueba, siempre y cuando esta sea de aguas,
para calcular el IRCA.
"""
prueba = models.OneToOneField(Prueba, related_name='puntaje_riesgo', on_delete=models.CASCADE)
puntaje = models.FloatField()
class Meta:
verbose_name = 'Puntaje Riesgo'
verbose_name_plural = 'Puntajes de Riesgo'
def __str__(self):
return 'Puntaje IRCA {:.2f} para {}'.format(self.puntaje, self.prueba)
def save(self, *args, **kwargs):
if self.prueba.area.programa != Programa.objects.aguas():
from django.db import IntegrityError
raise IntegrityError('Prueba debe ser de Aguas, pero es de {}'.format(self.prueba.area.programa))
super().save(*args, **kwargs)
class NivelRiesgo(UltimaModificacionMixin):
"""
Modelo para guardar los niveles de riesgo segun el IRCA.
"""
inicio = models.FloatField()
fin = models.FloatField()
nivel = models.CharField(max_length=255)
class Meta:
verbose_name = 'Nivel de Riesgo'
verbose_name_plural = 'Niveles de Riesgo'
def __str__(self):
return '{self.nivel}: de {self.inicio:.2f} a {self.fin:.2f}'.format(self=self)
def save(self, *args, **kwargs):
self.nivel = self.nivel.upper()
super().save(*args, **kwargs)
class Agua(Muestra):
"""Modelo usado para guardar las muestras de agua ingresadas a un laboratorio.
El modelo extiende de Muestra."""
# opciones
SI = True
NO = False
SI_NO_OPCIONES = (
(SI, 'Si'),
(NO, 'No'),
)
informacion_general = models.ForeignKey(InformacionAgua, on_delete=models.CASCADE)
motivo_analisis = models.ForeignKey(MotivoAnalisis, verbose_name='motivo del analisis', blank=True, null=True, on_delete=models.SET_NULL)
hora_toma = models.TimeField('hora de toma', blank=True, null=True)
codigo_punto = models.ForeignKey(CodigoPunto, verbose_name='cod. del punto', blank=True, null=True, on_delete=models.SET_NULL)
concertado = models.NullBooleanField('concertado')
irca = models.FloatField(null=True, blank=True)
class Meta:
verbose_name = 'muestra de agua'
verbose_name_plural = 'muestras de agua'
@property
def solicitante(self):
return self.informacion_general.solicitante
def calcular_irca(self):
"""Metodo para calcular el IRCA."""
_table = {
'>': operator.ge, '<': operator.le, '<=': operator.le,
'>=': operator.ge, '=<': operator.le, '=>': operator.ge
}
pruebas = self.pruebasrealizadas_set.filter(
prueba__in=PuntajeRiesgo.objects.all().values_list('prueba__id', flat=1)
)
digit = lambda rg, gr='digit': float(rg.group(gr).replace(r',', '.'))
irca = 0
total = pruebas.aggregate(suma=models.Sum('prueba__puntaje_riesgo__puntaje'))['suma'] or 0
regex = re.compile(r'(?P<operator>=?(<|>)=?\s*)?(?P<digit>\d+(,|\.)?\d*)\w*(\s*-\s*(?P<range_to>\d+(,|\.)?\d*))?|(?P<organoleptico>[nN]{1}[oO]{1}\s*[a-zA-Z]*)')
for prueba in pruebas:
resultado = prueba.resultado_numerico
referencia = prueba.prueba.valores_referencia
match_resultado = regex.match(resultado)
match_referencia = regex.match(referencia)
if not match_referencia:
if match_resultado and match_resultado.group('organoleptico') is not None:
irca += prueba.prueba.puntaje_riesgo.puntaje
else:
regex_aceptable = re.compile(r'[aA]{1}[cC]{1}[eE]{1}[pP]{1}[tT]{1}[aA]{1}[bB]{1}[lL]{1}[eE]{1}')
if regex_aceptable.match(referencia) is None:
print('No se calcula el IRCA para: {}({}) con el resultado: {}'.format(prueba.prueba, referencia, resultado))
continue
groups = match_referencia.groupdict()
valor_referencia = digit(match_referencia) if match_referencia.group('digit') else 0
result = digit(match_resultado) if match_resultado else None
if not result:
continue
if groups.get('operator') is not None:
function = _table[groups.get('operator').strip()]
if not function(result, valor_referencia):
irca += prueba.prueba.puntaje_riesgo.puntaje
elif groups.get('range_to') is not None:
_range = (valor_referencia, digit(match_referencia, 'range_to'))
if result > _range[1] or result < _range[0]:
irca += prueba.prueba.puntaje_riesgo.puntaje
elif groups.get('digit') is not None:
if result >= valor_referencia:
irca += prueba.prueba.puntaje_riesgo.puntaje
else:
print("No matches found for '{}'".format(referencia))
try:
return (irca / total) * 100
except ZeroDivisionError:
return 0
def get_clasificacion_irca(self):
"""Retorna la clasificacion de la muestra de acuerdo al IRCA."""
irca = self.irca if self.irca is not None else self.calcular_irca()
try:
return NivelRiesgo.objects.get(inicio__lte=irca, fin__gte=irca).nivel
except NivelRiesgo.DoesNotExist:
raise NotImplementedError('Nivel de riesgo para IRCA en rango de "{}" no fue encontrado.'.format(irca))
class ResponsableRecoleccion(UltimaModificacionMixin):
"""Modelo usado para guardar los responsables de recolección de las muestras de entomologia."""
nombres = models.CharField(max_length=200)
apellidos = models.CharField(max_length=200)
class Meta:
verbose_name = 'responsable de recolección'
verbose_name_plural = 'responsables de recolección'
def __str__(self):
return '{0} {1}'.format(self.nombres.title(), self.apellidos.title())
def save(self, *args, **kwargs):
self.nombres = self.nombres.lower()
self.apellidos = self.apellidos.lower()
super(ResponsableRecoleccion, self).save(*args, **kwargs)
class LugarRecoleccion(UltimaModificacionMixin):
"""Modelo usado para guardar los lugares de recolección para las muestras de entomologia."""
nombre = models.CharField(max_length=250)
municipio = models.ForeignKey(Municipio, on_delete=models.CASCADE)
class Meta:
verbose_name = 'lugar de recolección'
verbose_name_plural = 'lugares de recolección'
def __str__(self):
return '{0} ({1} - {2})'.format(self.nombre.title(), self.municipio.departamento, self.municipio)
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(LugarRecoleccion, self).save(*args, **kwargs)
class TipoVigilancia(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los tipos de vigilancia."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'tipo de vigilancia'
verbose_name_plural = 'tipos de vigilancia'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(TipoVigilancia, self).save(*args, **kwargs)
class Entomologia(Muestra):
"""Modelo usado para guardar las muestras de entomologia ingresadas a un laboratorio.
El modelo extiende de muestra."""
# opciones
PUPA = 'P'
NINFA = 'N'
HUEVO = 'H'
LARVAS = 'L'
ADULTO = 'A'
INDETERMINADO = 'I'
ESTADOS = (
(HUEVO, 'Huevo'),
(LARVAS, 'Larvas'),
(PUPA, 'Pupa'),
(NINFA, 'Ninfa'),
(ADULTO, 'Adulto'),
(INDETERMINADO, 'Indeterminado'),
)
responsable_recoleccion = models.ForeignKey(ResponsableRecoleccion, verbose_name='responsable de recolección', on_delete=models.CASCADE)
lugar_recoleccion = models.ForeignKey(LugarRecoleccion, verbose_name='lugar de recolección', on_delete=models.CASCADE)
tipo_vigilancia = models.ForeignKey(TipoVigilancia, verbose_name='tipo de vigilancia', on_delete=models.CASCADE)
estado_desarrollo = models.CharField('estado de desarrollo', max_length=1, choices=ESTADOS)
tipo_muestra = models.ForeignKey(TipoMuestra, related_name='muestras_entomologia', on_delete=models.CASCADE)
class Meta:
verbose_name = 'muestra de entomologia'
verbose_name_plural = 'muestras de entomologia'
@property
def solicitante(self):
return self.lugar_recoleccion
class ObjetoPrueba(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los strings de cada Objeto de Prueba."""
nombre = models.CharField(max_length=300)
class Meta:
verbose_name = 'proposito general de la prueba'
verbose_name_plural = 'propositos generales de la prueba'
def __str__(self):
return self.nombre.title()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(ObjetoPrueba, self).save(*args, **kwargs)
class Reporte(models.Model):
"""Modelo usado para guardar los informes de Laboratorio."""
registro_recepcion = models.ForeignKey(Recepcion, related_name='reportes', on_delete=models.CASCADE)
confirmado = models.BooleanField(default=False)
objeto = models.ForeignKey(ObjetoPrueba, verbose_name='Objeto de la Prueba', related_name='objeto_prueba_reporte', on_delete=models.CASCADE)
fecha = models.DateTimeField('fecha de reporte', blank=True, null=True)
fecha_aprobacion = models.DateTimeField('fecha de aprobación', blank=True, null=True)
objects = managers.ReporteManager()
class Meta:
verbose_name = 'reporte'
verbose_name_plural = 'reportes'
permissions = [
('can_generar_informe', 'Puede generar nuevo informe de resultado'),
('can_see_informe_resultados', 'Puede ver informe de resultado regenerado'),
]
class TipoEnvase(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los tipos de envase usados en una muestra de banco de sangre."""
nombre = models.CharField(max_length=50)
class Meta:
verbose_name = 'tipo de envase'
verbose_name_plural = 'tipos de envase'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(TipoEnvase, self).save(*args, **kwargs)
class InstitucionBancoSangre(UltimaModificacionMixin):
"""Modelo usado para guardar las instituciones en donde se recolecta una muestra."""
nombre = models.CharField(max_length=100)
municipio = models.ForeignKey(Municipio, related_name='instituciones_banco_sangre', on_delete=models.CASCADE)
class Meta:
verbose_name = 'institución banco de sangre'
verbose_name_plural = 'instituciónes banco de sangre'
def __str__(self):
return '{0} ({1}-{2})'.format(self.nombre.capitalize(), self.municipio.departamento, self.municipio)
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(InstitucionBancoSangre, self).save(*args, **kwargs)
class BancoSangre(Muestra):
"""Modelo usado para guardar las muestras de Banco de Sangre ingresadas a un laboratorio.
El modelo extiende de Muestra."""
# opciones
SI = True
NO = False
SI_NO_OPCIONES = (
(SI, 'Si'),
(NO, 'No'),
)
paciente = models.ForeignKey(Paciente, related_name='muestras_banco_sangre', on_delete=models.CASCADE)
tipo_muestra = models.ForeignKey(TipoMuestra, related_name='muestras_banco_sangre', on_delete=models.CASCADE)
institucion = models.ForeignKey(InstitucionBancoSangre, related_name='muestras_banco_sangre', on_delete=models.CASCADE)
tipo_envase = models.ForeignKey(TipoEnvase, verbose_name='tipo de envase', on_delete=models.CASCADE)
formatos_diligenciados = models.BooleanField(choices=SI_NO_OPCIONES)
ficha_pacientes = models.BooleanField('ficha de pacientes', choices=SI_NO_OPCIONES)
condensado_banco = models.BooleanField(choices=SI_NO_OPCIONES)
class Meta:
verbose_name = 'muestra de banco de sangre'
verbose_name_plural = 'muestras de banco de sangre'
@property
def solicitante(self):
return self.institucion
@property
def tipo(self):
return enums.TipoMuestraEnum.BANCO_SANGRE.value
class InstitucionCitohistopatologia(UltimaModificacionMixin):
"""Modelo usado para guardar las instituciones en donde se recolecta una muestra."""
nombre = models.CharField(max_length=100)
municipio = models.ForeignKey(Municipio, related_name='instituciones_citohistopatologia', on_delete=models.CASCADE)
codigo = models.BigIntegerField()
class Meta:
verbose_name = 'institución de citohistopatologia'
verbose_name_plural = 'instituciónes de citohistopatologia'
def __str__(self):
return '{0} ({1}-{2})'.format(self.nombre.capitalize(), self.municipio.departamento, self.municipio)
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(InstitucionCitohistopatologia, self).save(*args, **kwargs)
class Control(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los tipos de controles."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'control'
verbose_name_plural = 'controles'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(Control, self).save(*args, **kwargs)
class TipoEvento(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los tipos de eventos."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'tipo de evento'
verbose_name_plural = 'tipos de eventos'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(TipoEvento, self).save(*args, **kwargs)
class Citohistopatologia(Muestra):
"""Modelo usado para guardar las muestras de citohistopatologia ingresadas a un laboratorio.
El modelo extiende de Muestra."""
paciente = models.ForeignKey(Paciente, related_name='muestras_citohistopatologia', on_delete=models.CASCADE)
institucion = models.ForeignKey(InstitucionCitohistopatologia, on_delete=models.CASCADE)
control = models.ForeignKey(Control, on_delete=models.CASCADE)
tipo_evento = models.ForeignKey(TipoEvento, on_delete=models.CASCADE)
tipo_muestra = models.ForeignKey(TipoMuestra, related_name='muestras_citohistopatologia', on_delete=models.CASCADE)
class Meta:
verbose_name = 'muestra de citohistopatologia'
verbose_name_plural = 'muestras de citohistopatologia'
@property
def solicitante(self):
return self.institucion
@property
def tipo(self):
return enums.TipoMuestraEnum.CITOHISTOPATOLOGIA.value
class ProgramaEvaluacionExterna(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los programas para las muestras de evaluación externa."""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'programa de evaluación externa'
verbose_name_plural = 'programas de evaluación externa'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(ProgramaEvaluacionExterna, self).save(*args, **kwargs)
class TipoEventoEvaluacionExterna(EstadoMixin, UltimaModificacionMixin):
"""Modelo usado para guardar los tipos de evento para los programas de evaluación externa"""
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = 'tipo de evento de evaluación externa'
verbose_name_plural = 'tipos de eventos de evaluación externa'
def __str__(self):
return self.nombre.capitalize()
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(TipoEventoEvaluacionExterna, self).save(*args, **kwargs)
class InstitucionEEDD(UltimaModificacionMixin):
"""Modelo usado para guardar las instituciones para la evaluación externa desempeño directo."""
nombre = models.CharField(max_length=100)
direccion = models.CharField('dirección', max_length=100)
nit = models.CharField('n. nit', max_length=50)
municipio = models.ForeignKey(Municipio, related_name='instituciones_eedd', on_delete=models.CASCADE)
class Meta:
verbose_name = 'institución EEDD'
verbose_name_plural = 'instituciones EEDD'
def __str__(self):
return '{0} ({1}-{2})'.format(self.nombre.capitalize(), self.municipio.departamento, self.municipio)
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
self.direccion = self.direccion.lower()
super(InstitucionEEDD, self).save(*args, **kwargs)
class EEDD(Muestra):
"""Modelo usado para guardar las muestras de evaluación externa de desempeño directo ingresadas a un laboratorio.
El modelo extiende de Muestra."""
institucion = models.ForeignKey(InstitucionEEDD, on_delete=models.CASCADE)
control = models.ForeignKey(Control, related_name='muestras_eedd', on_delete=models.CASCADE)
tipo_evento = models.ForeignKey(TipoEventoEvaluacionExterna, related_name='muestras_eedd', on_delete=models.CASCADE)
tipo_muestra = models.ForeignKey(TipoMuestra, related_name='muestras_eedd', on_delete=models.CASCADE)
class Meta:
verbose_name = 'muestra EEDD'
verbose_name_plural = 'muestras EEDD'
@property
def solicitante(self):
return self.institucion
class InstitucionEEID(UltimaModificacionMixin):
"""Modelo usado para guardar las instituciones para la evaluación externa desempeño indirecto."""
nombre = models.CharField(max_length=100)
municipio = models.ForeignKey(Municipio, related_name='instituciones_eeid', on_delete=models.CASCADE)
codigo = models.IntegerField()
class Meta:
verbose_name = 'institución EEID'
verbose_name_plural = 'instituciones EEID'
def __str__(self):
return '{0} ({1}-{2})'.format(self.nombre.capitalize(), self.municipio.departamento, self.municipio)
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
# self.direccion = self.direccion.lower()
super(InstitucionEEID, self).save(*args, **kwargs)
class EEID(Muestra):
"""Modelo usado para guardar las muestras de evaluación externa de desempeño indirecto ingresadas a un laboratorio.
El modelo extiende de Muestra."""
# opciones
SI = True
NO = False
SI_NO_OPCIONES = (
(SI, 'Si'),
(NO, 'No'),
)
CEDULA = 'CC'
CODIGO = 'CO'
PASAPORTE = 'P'
CEDULACOD = 'CD'
REGISTRO_CIVIL = 'RC'
TARJETA_IDENTIDAD = 'TI'
CEDULA_EXTRANJERIA = 'CE'
SIN_IDENTIFICACION = 'NN'
TIPOS_IDENTIFICACION = (
(CEDULA, 'CC'),
(TARJETA_IDENTIDAD, 'TI'),
(REGISTRO_CIVIL, 'RC'),
(PASAPORTE, 'P'),
(CEDULA_EXTRANJERIA, 'CE'),
(CODIGO, 'COD'),
(CEDULACOD, 'CC/COD'),
(SIN_IDENTIFICACION, 'SIN DATOS'),
)
HORAS = 'H'
MESES = 'M'
DIAS = 'D'
ANOS = 'A'
UNIDADES_MEDIDA = (
(ANOS, 'Años'),
(MESES, 'Meses'),
(DIAS, 'Dias'),
(HORAS, 'Horas'),
)
nombre = models.CharField('nombres', max_length=100)
identificacion = models.CharField('documento id', max_length=50)
tipo_identificacion = models.CharField(max_length=2, choices=TIPOS_IDENTIFICACION)
edad = models.IntegerField()
tipo_edad = models.CharField(max_length=1, choices=UNIDADES_MEDIDA)
institucion = models.ForeignKey(InstitucionEEID, related_name='muestras_eeid', on_delete=models.CASCADE)
programado = models.BooleanField(choices=SI_NO_OPCIONES)
control = models.ForeignKey(Control, related_name='muestras_eeid', on_delete=models.CASCADE)
programa = models.ForeignKey(ProgramaEvaluacionExterna, related_name='muestras_eeid', on_delete=models.CASCADE)
tipo_evento = models.ForeignKey(TipoEventoEvaluacionExterna, related_name='muestras_eeid', on_delete=models.CASCADE)
tipo_muestra = models.ForeignKey(TipoMuestra, related_name='muestras_eeid', on_delete=models.CASCADE)
class Meta:
verbose_name = 'muestra EEID'
verbose_name_plural = 'muestras EEID'
def save(self, *args, **kwargs):
self.nombre = self.nombre.lower()
super(EEID, self).save(*args, **kwargs)
@property
def solicitante(self):
return self.institucion
|
# absolute value
print(abs(-2))
# all values are true, which means either true, either not zero, either not None
print(all([1,2,3])) # true
print(all([0,1,2])) #false
# any value is true, not zero or not none
print(any([0,None,False])) # false
print(any([0,False,1])) #true
# returns ascii only representation of the object
print(ascii('something'))
# This function drops you into debugger at the call site - runs debugger console for you#
# breakpoint() # przydaje sie jak korzystamy z wbudowanego debuggera np piszac w notatniku, w IDE sa normalne breakpointy
print('after breakpoint')
# returns a new array of bytes
print(bytearray([1,2,3]))
# returns a new byte object
print(bytes("alala", encoding="utf-8"))
# returns true if object is callable
a = lambda x: x*x
print(callable(a)) # true
print(callable("2")) # false
# creates a complex number
print(complex(3)) # (3+0j)
# allows you to invoke the expression stored in string without unpacking it
expression_in_string = "2**2"
print(eval(expression_in_string))
class Jakas:
def printer(self):
print('dzialam')
print(eval("Jakas().printer()"))
# the same, diffrent parameters and options if you want to dig into
print(exec("Jakas().printer()"))
# set is mutable, frozenset is hashable and immutable
secik = set([1,2,3])
secik2 = {1,2,3}
frozen_secik = frozenset([1,2,3])
print(secik)
print(secik2)
print(frozen_secik) # the diffrence is that this one is immutable and hashable
# returns the list of names in the current local scope :D :D :D :D
# prints all the attributes of the object
b = 1
print(dir(b))
print(dir())
# returns dictionary representing only locals
print(locals())
# returns dictionary representing only globals
print(globals())
# checks whether the object has attribute
#print(hasattr(object, name))
# returns hash value of an object
print(hash(b))
# returns identity of an object
print(id(b))
# returns interesting info about object and opens interactive console for you
#print(help(b))
# converts number to binary string
print(bin(123123)) # 0b11110000011110011
# converts to hex number
print(hex(255)) # 0xff
print(hex(123123)) # 0x1e0f3
# returns octal representation
print(oct(1)) # 0o1
# cheks if object is type of
print(isinstance("asd", str))
# checks if object is child of
print(issubclass(int, object))
# returns char represented by integer in unicode code
print(chr(97)) # a
print(chr(8364)) # €
# przydatne do zabawy :D :D :D
# returns unicode code of chr given
print(ord('a')) # 97
print(ord('€')) # 8364
# returns reverser iterator
lista2 = [1,2,3]
print(list(reversed(lista2))) # [3,2,1]
print(lista2[::-1]) # [3,2,1]
# rounds the value
print(round(1.23, 1)) # 1.2
print(round(1.23)) # 1
print(round(1.51)) # 2
# super()
# you can use that to invoke methods from parent class
class A:
def elo(self):
print('asd')
class B(A):
def elo2(self):
super().elo()
obj = B()
obj.elo2()
# # iterator
lista = [1,2,3,4]
iteratorek = iter(lista)
|
class Solution:
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
if not grid or grid[0][0]==1 or grid[-1][-1]==1:
return -1
q = deque([])
q.append([0, 0, 1])
visited = set()
visited.add((0, 0))
dirs = [(-1, 0), (-1, -1), (-1, 1), (1, 0), (1, -1), (1, 1), (0, -1), (0, 1)]
while q:
x, y, d = q.popleft()
if x==len(grid)-1 and y ==len(grid[0])-1:
return d
#Explore the neighbours
for m, n in dirs:
i, j = x + m, y + n
if i<0 or i>=len(grid) or j<0 or j>=len(grid[0]) or (i, j) in visited or grid[i][j]==1:
continue
visited.add((i, j))
q.append([i, j, d+1])
return -1
|
# This file is part of KoreanCodecs.
#
# Copyright(C) 2002-2003 Hye-Shik Chang <perky@FreeBSD.org>.
#
# KoreanCodecs is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# KoreanCodecs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with KoreanCodecs; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: hangul.py,v 1.2 2003/10/15 19:24:53 perky Exp $
#
class UnicodeHangulError(Exception):
def __init__ (self, msg):
self.msg = msg
Exception.__init__(self, msg)
def __repr__ (self):
return self.msg
__str__ = __repr__
Null = ''
class Jaeum(object):
Codes = ('\u3131', '\u3132', '\u3133', '\u3134', '\u3135', '\u3136',
# G GG GS N NJ NH
'\u3137', '\u3138', '\u3139', '\u313a', '\u313b', '\u313c',
# D DD L LG LM LB
'\u313d', '\u313e', '\u313f', '\u3140', '\u3141', '\u3142',
# LS LT LP LH M B
'\u3143', '\u3144', '\u3145', '\u3146', '\u3147', '\u3148',
# BB BS S SS NG J
'\u3149', '\u314a', '\u314b', '\u314c', '\u314d', '\u314e')
# JJ C K T P H
Width = len(Codes)
G, GG, GS, N, NJ, NH, D, DD, L, LG, LM, LB, LS, LT, LP, LH, M, B, \
BB, BS, S, SS, NG, J, JJ, C, K, T, P, H = Codes
Choseong = [G, GG, N, D, DD, L, M, B, BB, S, SS, NG, J, JJ, C, K, T, P, H]
Jongseong = [Null, G, GG, GS, N, NJ, NH, D, L, LG, LM, LB, LS, LT, \
LP, LH, M, B, BS, S, SS, NG, J, C, K, T, P, H]
MultiElement = {
GG: (G, G), GS: (G, S), NJ: (N, J), NH: (N, H), DD: (D, D),
LG: (L, G), LM: (L, M), LB: (L, B), LS: (L, S), LT: (L, T),
LP: (L, P), LH: (L, H), BB: (B, B), BS: (B, S), SS: (S, S),
JJ: (J, J)
}
class Moeum(object):
Codes = ('\u314f', '\u3150', '\u3151', '\u3152', '\u3153', '\u3154',
# A AE YA YAE EO E
'\u3155', '\u3156', '\u3157', '\u3158', '\u3159', '\u315a',
# YEO YE O WA WAE OE
'\u315b', '\u315c', '\u315d', '\u315e', '\u315f', '\u3160',
# YO U WEO WE WI YU
'\u3161', '\u3162', '\u3163')
# EU YI I
Width = len(Codes)
A, AE, YA, YAE, EO, E, YEO, YE, O, WA, WAE, OE, YO, \
U, WEO, WE, WI, YU, EU, YI, I = Codes
Jungseong = list(Codes)
MultiElement = {
AE: (A, I), YAE: (YA, I), YE: (YEO, I), WA: (O, A), WAE: (O, A, I),
OE: (O, I), WEO: (U, EO), WE: (U, E), WI: (U, I), YI: (EU, I)
}
# Aliases for your convinience
Choseong = Jaeum.Choseong
Jungseong = Moeum.Jungseong
Jongseong = Jaeum.Jongseong
for name, code in list(Jaeum.__dict__.items()) + list(Moeum.__dict__.items()):
if name.isupper() and len(name) <= 3:
exec("%s = %s" % (name, repr(code)))
del name, code
# Unicode Hangul Syllables Characteristics
ZONE = ('\uAC00', '\uD7A3')
NCHOSEONG = len(Choseong)
NJUNGSEONG = len(Jungseong)
NJONGSEONG = len(Jongseong)
JBASE_CHOSEONG = '\u1100'
JBASE_JUNGSEONG = '\u1161'
JBASE_JONGSEONG = '\u11A8'
CHOSEONG_FILLER = '\u115F'
JUNGSEONG_FILLER = '\u1160'
_ishangul = (
lambda code:
ZONE[0] <= code <= ZONE[1] or
code in Jaeum.Codes or
code in Moeum.Codes
)
# Alternative Suffixes : do not use outside
ALT_SUFFIXES = {
'\uc744': ('\ub97c', '\uc744'), # reul, eul
'\ub97c': ('\ub97c', '\uc744'), # reul, eul
'\uc740': ('\ub294', '\uc740'), # neun, eun
'\ub294': ('\ub294', '\uc740'), # neun, eun
'\uc774': ('\uac00', '\uc774'), # yi, ga
'\uac00': ('\uac00', '\uc774'), # yi, ga
'\uc640': ('\uc640', '\uacfc'), # wa, gwa
'\uacfc': ('\uc640', '\uacfc'), # wa, gwa
}
# Ida-Varitaion Suffixes : do not use outside
IDA_SUFFIXES = {
'(\uc774)': ('', '\uc774'), # (yi)da
'(\uc785)': (17, '\uc785'), # (ip)nida
'(\uc778)': (4, '\uc778'), # (in)-
}
def isJaeum(u):
if u:
for c in u:
if c not in Jaeum.Codes:
break
else:
return True
return False
def isMoeum(u):
if u:
for c in u:
if c not in Moeum.Codes:
break
else:
return True
return False
def ishangul(u):
if u:
for c in u:
if not _ishangul(c):
break
else:
return True
return False
def join(codes):
""" Join function which makes hangul syllable from jamos """
if len(codes) is not 3:
raise UnicodeHangulError("needs 3-element tuple")
if not codes[0] or not codes[1]: # single jamo
return codes[0] or codes[1]
return chr(
0xac00 + (
Choseong.index(codes[0])*NJUNGSEONG +
Jungseong.index(codes[1])
)*NJONGSEONG + Jongseong.index(codes[2])
)
def split(code):
""" Split function which splits hangul syllable into jamos """
if len(code) != 1 or not _ishangul(code):
raise UnicodeHangulError("needs 1 hangul letter")
if code in Jaeum.Codes:
return (code, Null, Null)
if code in Moeum.Codes:
return (Null, code, Null)
code = ord(code) - 0xac00
return (
Choseong[int(code / (NJUNGSEONG*NJONGSEONG))], # Python3000 safe
Jungseong[int(code / NJONGSEONG) % NJUNGSEONG],
Jongseong[code % NJONGSEONG]
)
def conjoin(s):
obuff = []
ncur = 0
while ncur < len(s):
c = s[ncur]
if JBASE_CHOSEONG <= c <= '\u1112' or c == CHOSEONG_FILLER: # starts with choseong
if len(s) > ncur+1 and JUNGSEONG_FILLER <= s[ncur+1] <= '\u1175':
cho = Choseong[ord(c) - ord(JBASE_CHOSEONG)]
jung = Jungseong[ord(s[ncur+1]) - ord(JBASE_JUNGSEONG)]
if len(s) > ncur+2 and JBASE_JONGSEONG <= s[ncur+2] <= '\u11C2':
jong = Jongseong[ord(s[ncur+2]) - ord(JBASE_JONGSEONG) + 1]
ncur += 2
else:
jong = Null
ncur += 1
obuff.append(join([cho, jung, jong]))
else:
obuff.append(join([Choseong[ord(c) - ord(JBASE_CHOSEONG)], Null, Null]))
elif JBASE_JUNGSEONG <= c <= '\u1175':
obuff.append(join([Null, Jungseong[ord(c) - ord(JBASE_JUNGSEONG)], Null]))
else:
obuff.append(c)
ncur += 1
return ''.join(obuff)
def disjoint(s):
obuff = []
for c in s:
if _ishangul(c):
cho, jung, jong = split(c)
if cho:
obuff.append( chr(ord(JBASE_CHOSEONG) + Choseong.index(cho)) )
else:
obuff.append( CHOSEONG_FILLER )
if jung:
obuff.append( chr(ord(JBASE_JUNGSEONG) + Jungseong.index(jung)) )
else:
obuff.append( JUNGSEONG_FILLER )
if jong:
obuff.append( chr(ord(JBASE_JONGSEONG) + Jongseong.index(jong) - 1) )
else:
obuff.append(c)
return ''.join(obuff)
def _has_final(c):
# for internal use only
if '\uac00' <= c <= '\ud7a3': # hangul
return 1, (ord(c) - 0xac00) % 28 > 0
else:
return 0, c in '013678.bklmnptLMNRZ'
def format(fmtstr, *args, **kwargs):
if kwargs:
argget = lambda:kwargs
else:
argget = iter(args).next
obuff = []
ncur = escape = fmtinpth = 0
ofmt = fmt = ''
while ncur < len(fmtstr):
c = fmtstr[ncur]
if escape:
obuff.append(c)
escape = 0
ofmt = ''
elif c == '\\':
escape = 1
elif fmt:
fmt += c
if not fmtinpth and c.isalpha():
ofmt = fmt % argget()
obuff.append(ofmt)
fmt = ''
elif fmtinpth and c == ')':
fmtinpth = 0
elif c == '(':
fmtinpth = 1
elif c == '%':
obuff.append('%')
elif c == '%':
fmt += c
ofmt = ''
else:
if ofmt and c in ALT_SUFFIXES:
obuff.append(ALT_SUFFIXES[c][
_has_final(ofmt[-1])[1] and 1 or 0
])
elif ofmt and fmtstr[ncur:ncur+3] in IDA_SUFFIXES:
sel = IDA_SUFFIXES[fmtstr[ncur:ncur+3]]
ishan, hasfinal = _has_final(ofmt[-1])
if hasfinal:
obuff.append(sel[1])
elif ishan:
if sel[0]:
obuff[-1] = obuff[-1][:-1] + chr(ord(ofmt[-1]) + sel[0])
else:
obuff.append(sel[0] and sel[1])
ncur += 2
else:
obuff.append(c)
ofmt = ''
ncur += 1
return ''.join(obuff)
#
# This file is part of KoreanCodecs.
#
# Copyright(C) 2002-2003 Hye-Shik Chang <perky@FreeBSD.org>.
#
# KoreanCodecs is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# KoreanCodecs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with KoreanCodecs; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: qwerty2bul.py,v 1.4 2003/10/16 03:58:14 perky Exp $
#
import codecs
_2bul_codekeymap = {
Jaeum.G: 'r', Jaeum.GG:'R', Jaeum.GS: 'rt',
Jaeum.N: 's', Jaeum.NJ:'sw', Jaeum.NH: 'sg', Jaeum.D: 'e',
Jaeum.DD:'E', Jaeum.L: 'f', Jaeum.LG: 'fr', Jaeum.LM: 'fa',
Jaeum.LB:'fq', Jaeum.LS:'ft', Jaeum.LT: 'fx', Jaeum.LP: 'fv',
Jaeum.LH:'fg', Jaeum.M: 'a', Jaeum.B: 'q', Jaeum.BB: 'Q',
Jaeum.BS:'qt', Jaeum.S: 't', Jaeum.SS: 'T', Jaeum.NG: 'd',
Jaeum.J: 'w', Jaeum.JJ:'W', Jaeum.C: 'c', Jaeum.K: 'z',
Jaeum.T: 'x', Jaeum.P: 'v', Jaeum.H: 'g',
Moeum.A: 'k', Moeum.AE:'o', Moeum.YA: 'i', Moeum.YAE:'O',
Moeum.EO:'j', Moeum.E: 'p', Moeum.YEO:'u', Moeum.YE: 'P',
Moeum.O: 'h', Moeum.WA:'hk', Moeum.WAE:'ho', Moeum.OE: 'hl',
Moeum.YO:'y', Moeum.U: 'n', Moeum.WEO:'nj', Moeum.WE: 'np',
Moeum.WI:'nl', Moeum.YU:'b', Moeum.EU: 'm', Moeum.YI: 'ml',
Moeum.I: 'l',
'': '',
}
_2bul_keycodemap = {}
for k, v in _2bul_codekeymap.items():
_2bul_keycodemap[v] = k
_2bul_keycodemap.setdefault(v.upper(), k)
_2bul_keycodes = ''.join(_2bul_keycodemap.keys())
del k, v
class Automata_Hangul2(object):
# must Unicode in / Unicode out
def __init__(self):
self.clear()
def pushcomp(self):
if not (self.chosung and self.jungsung):
self.word_valid = 0
self.word_comp.append(join([
self.chosung, self.jungsung, self.jongsung
]))
self.clearcomp()
def clearcomp(self):
self.chosung = ''
self.jungsung = ''
self.jongsung = ''
def clear(self):
self.buff = ['']
self.word_raw = []
self.word_comp = []
self.word_valid = 1
self.clearcomp()
def convert(self, s):
self.clear()
for c in s: self.feed(c)
self.finalize()
return ''.join(self.buff)
def finalize(self):
if self.chosung or self.jungsung or self.jongsung:
self.pushcomp()
if self.word_raw or self.word_comp:
if self.word_valid:
rjoi = ''.join(self.word_comp)
r = 0
else:
self.word_valid = 1
rjoi = ''.join(self.word_raw)
r = 1
self.word_raw, self.word_comp = [], []
if rjoi:
self.buff.append(rjoi)
return r
return 0
def feed(self, c):
self.word_raw.append(c)
if c in _2bul_keycodes:
code = _2bul_keycodemap[c]
if isJaeum(code):
if not self.chosung: # chosung O
if self.jungsung or self.jongsung:
self.word_valid = 0
else:
self.chosung = code
elif not self.jungsung: # chosung O jungsung X
if self.jongsung:
self.word_valid = 0
else:
self.pushcomp()
self.chosung = code
elif not self.jongsung: # chosung O jungsung O jongsung X
if code not in Jongseong:
self.pushcomp()
self.chosung = code
else:
self.jongsung = code
else: # full
trymul = _2bul_codekeymap[self.jongsung] + c
if trymul in _2bul_keycodemap: # can be multi jongsung
self.jongsung = _2bul_keycodemap[trymul]
else:
self.pushcomp()
self.chosung = code
else: # MOEUM...
if not self.jongsung:
if not self.jungsung: # jungsung X jongsung X
self.jungsung = code
else: # jungsung O jongsung X
trymul = _2bul_codekeymap[self.jungsung] + c
if trymul in _2bul_keycodemap: # can be multi jungsung
self.jungsung = _2bul_keycodemap[trymul]
else:
self.pushcomp()
self.jungsung = code
else: # jongsung O
if len(_2bul_codekeymap[self.jongsung]) > 1:
ojong = _2bul_keycodemap[_2bul_codekeymap[self.jongsung][:-1]]
ncho = _2bul_keycodemap[_2bul_codekeymap[self.jongsung][-1]]
self.jongsung = ojong
self.pushcomp()
self.chosung = ncho
self.jungsung = code
else:
njong = self.jongsung
self.jongsung = ''
self.pushcomp()
self.chosung = njong
self.jungsung = code
else: # non key code
if not self.finalize():
self.buff.append(c)
class Codec_Hangul2(codecs.Codec):
BASECODEC = 'utf-8' # fallback codec of decoder
# Unicode to key stroke
def encode(self, data, errors='strict'):
if errors not in ('strict', 'ignore', 'replace'):
raise ValueError("unknown error handling")
r = []
for c in data:
if c <= '\u0080':
r.append(c.encode('ascii'))
elif not ishangul(c):
r.append(c.encode(self.BASECODEC, errors=errors))
else:
for k in split(c):
r.append(_2bul_codekeymap[k])
r = ''.join(r)
return (r, len(r))
# key stroke to Unicode
def decode(self, data, errors='strict'):
if errors not in ('strict', 'ignore', 'replace'):
raise ValueError("unknown error handling")
if isinstance(data, str):
s = data
else:
s = str(data, self.BASECODEC, errors)
am = Automata_Hangul2()
r = am.convert(s)
return (r, len(r))
class StreamWriter_Hangul2(Codec_Hangul2, codecs.StreamWriter):
pass
class StreamReader_Hangul2(Codec_Hangul2, codecs.StreamReader):
pass
#
# This file is part of KoreanCodecs.
#
# Copyright(C) 2002-2003 Hye-Shik Chang <perky@FreeBSD.org>.
#
# KoreanCodecs is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# KoreanCodecs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with KoreanCodecs; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: qwerty3bul.py,v 1.2 2003/10/16 03:58:14 perky Exp $
#
import codecs
# This mapping table is kindly contributed by Mithrandir.
_3bul_codekeymap = [{
Jaeum.G: 'k', Jaeum.GG:'kk',
Jaeum.N: 'h',
Jaeum.D: 'u',
Jaeum.DD:'uu',
Jaeum.L: 'y',
Jaeum.M: 'i', Jaeum.B: ';', Jaeum.BB: ';;',
Jaeum.S: 'n', Jaeum.SS: 'nn',
Jaeum.NG:'j',
Jaeum.J: 'l', Jaeum.JJ:'ll', Jaeum.C: 'o', Jaeum.K: '0',
Jaeum.T: '\'', Jaeum.P: 'p', Jaeum.H: 'm',
'': '',
}, {
Moeum.A: 'f', Moeum.AE:'r', Moeum.YA: '6', Moeum.YAE:'G',
Moeum.EO:'t', Moeum.E: 'c', Moeum.YEO:'e', Moeum.YE: '7',
Moeum.O: 'v', Moeum.WA:'/f', Moeum.WAE:'/r', Moeum.OE: '/d',
Moeum.YO:'4', Moeum.U: 'b', Moeum.WEO:'9t', Moeum.WE: '9c',
Moeum.WI:'9d', Moeum.YU:'5', Moeum.EU: 'g', Moeum.YI: '8',
Moeum.I: 'd',
'': '',
}, {
Jaeum.G: 'x', Jaeum.GG: '!', Jaeum.GS: 'V',
Jaeum.N: 's', Jaeum.NJ: 'E', Jaeum.NH: 'S',
Jaeum.D: 'A',
Jaeum.L: 'w', Jaeum.LG: '@', Jaeum.LM: 'F',
Jaeum.LB: 'D', Jaeum.LS: 'T', Jaeum.LT: '%',
Jaeum.LP:'$', Jaeum.LH: 'R',
Jaeum.M: 'z',
Jaeum.B: '3', Jaeum.BS: 'X',
Jaeum.S: 'q', Jaeum.SS: '2',
Jaeum.NG:'a', Jaeum.J: '#',
Jaeum.C: 'Z', Jaeum.K: 'C',
Jaeum.T: 'W', Jaeum.P: 'Q', Jaeum.H: '1',
'': '',
}]
_3bul_keycodemap = []
for cmap in _3bul_codekeymap:
m = {}
for k, v in cmap.items():
m[v] = k
m.setdefault(v.upper(), k)
_3bul_keycodemap.append(m)
_3bul_keycodemap[1].update({
'/': Moeum.O, '9': Moeum.U,
}) # double allocated jungseongs
class Automata_Hangul3(object):
# must Unicode in / Unicode out
def __init__(self):
self.clear()
def pushcomp(self):
if not (self.choseong and self.jungseong):
self.word_valid = 0
self.word_comp.append(join([
self.choseong, self.jungseong, self.jongseong
]))
self.clearcomp()
def clearcomp(self):
self.choseong = ''
self.jungseong = ''
self.jongseong = ''
def clear(self):
self.buff = ['']
self.word_raw = []
self.word_comp = []
self.word_valid = 1
self.clearcomp()
def convert(self, s):
self.clear()
for c in s: self.feed(c)
self.finalize()
return ''.join(self.buff)
def finalize(self):
if self.choseong or self.jungseong or self.jongseong:
self.pushcomp()
if self.word_raw or self.word_comp:
if self.word_valid:
rjoi = ''.join(self.word_comp)
r = 0
else:
self.word_valid = 1
rjoi = ''.join(self.word_raw)
r = 1
self.word_raw, self.word_comp = [], []
if rjoi:
self.buff.append(rjoi)
return r
return 0
def feed(self, c):
self.word_raw.append(c)
if c in _3bul_keycodemap[0]: # choseong key
if self.choseong:
if (self.choseong in (Jaeum.G, Jaeum.D, Jaeum.B, Jaeum.S,
Jaeum.J) and self.choseong == _3bul_keycodemap[0][c]):
c = c+c
else:
self.pushcomp()
self.choseong = _3bul_keycodemap[0][c]
elif c in _3bul_keycodemap[1]: # jungseong key
if self.jungseong:
if self.jungseong == Moeum.O and '/'+c in _3bul_keycodemap[1]:
c = '/'+c
elif self.jungseong == Moeum.U and '9'+c in _3bul_keycodemap[1]:
c = '9'+c
else:
self.pushcomp()
self.jungseong = _3bul_keycodemap[1][c]
elif c in _3bul_keycodemap[2]: # jongseong key
if self.jongseong:
self.pushcomp()
self.jongseong = _3bul_keycodemap[2][c]
else: # non key code
if not self.finalize():
self.buff.append(c)
class Codec_Hangul3(codecs.Codec):
BASECODEC = 'utf-8' # fallback codec of decoder
# Unicode to key stroke
def encode(self, data, errors='strict'):
if errors not in ('strict', 'ignore', 'replace'):
raise ValueError("unknown error handling")
r = []
for c in data:
if c <= '\u0080':
r.append(c.encode('ascii'))
elif not ishangul(c):
r.append(c.encode(self.BASECODEC, errors=errors))
else:
for k, m in zip(split(c), _3bul_codekeymap):
r.append(m[k])
r = ''.join(r)
return (r, len(r))
# key stroke to Unicode
def decode(self, data, errors='strict'):
if errors not in ('strict', 'ignore', 'replace'):
raise ValueError("unknown error handling")
if isinstance(data, str):
s = data
else:
s = str(data, self.BASECODEC, errors)
am = Automata_Hangul3()
r = am.convert(s)
return (r, len(r))
class StreamWriter_Hangul3(Codec_Hangul3, codecs.StreamWriter):
pass
class StreamReader_Hangul3(Codec_Hangul3, codecs.StreamReader):
pass
|
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views.generic import ListView, TemplateView, FormView
from gscan.models import *
from gscan.tasks.tasks import *
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from django.contrib.auth.models import User
import hashlib
import time
@login_required(login_url="/")
def scan_index(request):
task_list = Tasks.objects.all()
return render(request, 'index.html' ,{'task_list' : task_list})
@login_required(login_url="/")
def scan_task(request):
if request.method == 'POST':
taskname = request.POST.get('taskname')
target = request.POST.get('target')
config = request.POST.get('config')
taskid = hashlib.md5(str(time.time())).hexdigest()[0:20]
new_task = Tasks(
taskid = taskid,
taskname = taskname,
target = target,
domain = 'pend',
service = 'pend',
weakfile = 'pend',
status = 'start',
config = config,
)
new_task.save()
manage_task.delay(taskid)
return HttpResponseRedirect('/index')
config_list = Config.objects.all()
return render(request, 'tasks.html', {'config_list': config_list})
@login_required(login_url="/")
def scan_login(request):
return render(request, 'login.html')
@login_required(login_url="/")
def scan_plugin(request):
plugin_list = Plugins.objects.all()
return render(request, 'plugin.html',{'plugin_list' : plugin_list})
@login_required(login_url="/")
def domain_info(request, task_id):
domain_list = Domain.objects.filter(taskid=task_id).order_by('ip')
return render(request, 'domaininfo.html' ,{'domain_list' : domain_list, 'task_id' : task_id})
@login_required(login_url="/")
def service_info(request, task_id):
service_list = Service.objects.filter(taskid=task_id).order_by('ip','port')
return render(request, 'serviceinfo.html' ,{'service_list' : service_list, 'task_id' : task_id})
@login_required(login_url="/")
def file_info(request, task_id):
weakfile_list = Weakfile.objects.filter(taskid=task_id)
return render(request, 'fileinfo.html' ,{'weakfile_list' : weakfile_list, 'task_id' : task_id})
@login_required(login_url="/")
def logout(request):
auth.logout(request)
return HttpResponseRedirect("/")
@login_required(login_url="/")
def scan_config(request):
config_list = Config.objects.all()
return render(request, 'config.html', {'config_list' : config_list})
@login_required(login_url="/")
def config_add(request):
if request.method == 'POST':
name = request.POST.get('name')
ports = request.POST.get('ports')
description = request.POST.get('description')
new_config = Config(
name = name,
ports = ports,
description = description,
)
new_config.save()
return HttpResponseRedirect("/config")
return render(request, 'cadd.html')
@login_required(login_url="/")
def config_edit(request, id):
if request.method == 'POST':
ports = request.POST.get('ports')
description = request.POST.get('description')
Config.objects.filter(id = id).update(ports=ports,description=description)
config_list = Config.objects.all()
return render(request, 'config.html', {'config_list' : config_list})
config_rs = Config.objects.filter(id=id).all()
return render(request, 'cedit.html', {'config_rs' : config_rs})
@login_required(login_url="/")
def scan_profile(request):
user = User.objects.get(id = 1)
return render(request, 'profile.html', {'user' : user})
def celery_test(request):
taskid = '5d16a5715682a70f2ea3'
manage_task.delay(taskid)
return render(request, 'login.html')
|
import requests
from urllib.parse import urlencode
import hmac, hashlib, datetime, math, json
import traceback
from numpy import linspace
from pandas import to_numeric
from time import time
from pytz import reference
from ..utils.utils import since
from ..utils.types import *
ROOT_URL = 'https://api.binance.com/api/v3/'
def get_all_pairs(symbol_s: Optional[Union[str, List[str]]] = None) -> Union[Pair, List[Pair]]:
try:
endpoint = 'exchangeInfo'
if symbol_s:
if isinstance(symbol_s, str): endpoint += ('?symbol='+symbol_s)
else: endpoint += ('?symbols=['+','.join(['\"'+a+'\"' for a in symbol_s])+']')
exchange_info = requests.get(ROOT_URL + endpoint).json()
prices = {p['symbol']: float(p['price']) for p in requests.get(ROOT_URL + 'ticker/price').json()}
pair_list = [Pair(
symbol=s['symbol'],
left=s['baseAsset'],
right=s['quoteAsset'],
left_min=float(next((f for f in s['filters'] if f['filterType'] == 'LOT_SIZE'), None)['minQty']),
right_min=float(next((f for f in s['filters'] if f['filterType'] == 'MIN_NOTIONAL'), None)['minNotional']),
left_prec=f"{float(s['filters'][2]['stepSize']):g}"[::-1].find('.'),
right_prec=int(s['quoteAssetPrecision']),
curr_price=prices[s['symbol']]
) for s in exchange_info['symbols']]
if symbol_s and isinstance(symbol_s, str):
return pair_list[0]
return pair_list
except:
print('** FAILED TO GET ALL PAIRS **')
print(traceback.print_exc())
def get_pair(symbol: str):
try: return get_all_pairs(symbol)
except: print(symbol, 'is not a valid pair.')
def get_backdata(symbol: str, interval: str, ticks: int) -> DataFrame:
klines = []
MAX_TICK_REQUEST = 1000
epoch_to_iso = lambda t: datetime.datetime.fromtimestamp(float(t / 1000),tz=reference.LocalTimezone()).strftime('%Y-%m-%d %H:%M:%S')
cols = ["time", "open", "high", "low", "close", "volume", "close_time", "qav", "trade_count", "taker_bav", "taker_qav", "ignore"]
drop_cols = ["close_time", "qav", "trade_count", "taker_bav", "taker_qav", "ignore"]
num_cols = ["open", "high", "low", "close", "volume"]
data = DataFrame(columns=cols)
if ticks > MAX_TICK_REQUEST:
curr_epoch = int(datetime.datetime.now(tz=reference.LocalTimezone()).timestamp())
since_epoch = since(interval, ticks, curr_epoch)
epoch_count = math.ceil(ticks/MAX_TICK_REQUEST) + 1
epochs = linspace(since_epoch*1000, curr_epoch*1000, epoch_count, dtype=int)[:-1]
for epoch in epochs:
params = {'symbol': symbol, 'interval': interval, 'startTime': epoch, 'limit': MAX_TICK_REQUEST}
klines = json.loads(requests.get(ROOT_URL + 'klines', params=params).text)
temp = DataFrame(klines, columns=cols)
temp['time'] = temp['time'].apply(epoch_to_iso)
temp = temp.set_index('time')
temp = temp.drop_duplicates()
data = data.append(temp.loc[temp.index.difference(data.index),:])
data = data.drop(drop_cols, axis=1)
data[num_cols] = data[num_cols].apply(to_numeric, axis=1)
else:
params = {'symbol': symbol, 'interval': interval, 'limit': ticks}
klines = json.loads(requests.get(ROOT_URL + 'klines', params=params).text)
data = DataFrame(klines, columns=cols).drop(["close_time", "qav", "trade_count", "taker_bav", "taker_qav", "ignore"], axis=1)
data[["open", "high", "low", "close", "volume"]] = data[["open", "high", "low", "close", "volume"]].apply(to_numeric, axis=1)
data["time"] = data["time"].apply(epoch_to_iso)
data = data.set_index("time")
if 'time' in data.columns: data = data.drop('time', axis=1)
return data.loc[~(data.index.duplicated(False))].sort_index()
def __authenticated_request(http_method, endpoint, key, secret, params={}):
def dispatch_request(http_method):
session = requests.Session()
session.headers.update({
'Content-Type': 'application/json;charset=utf-8',
'X-MBX-APIKEY': key
})
return {
'GET': session.get,
'DELETE': session.delete,
'PUT': session.put,
'POST': session.post,
}.get(http_method, 'GET')
query_string = urlencode(params, True)
query_string = query_string + ('&' if query_string else '') + 'timestamp=' + str(int(1000*time()))
hashed = hmac.new(secret.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()
url = ROOT_URL + endpoint + '?' + query_string + '&signature=' + hashed
params = {'url': url, 'params': {}}
return dispatch_request(http_method)(**params)
def get_available_pairs(tld: str):
exchange_info = requests.get(ROOT_URL + 'exchangeInfo').json()
prices = {p['symbol']: float(p['price']) for p in requests.get(ROOT_URL + 'ticker/price').json()}
pair_list = [s['symbol'] for s in exchange_info['symbols']]
return pair_list
def authenticate(key: str, secret: str) -> bool:
return __authenticated_request('GET', 'account', key, secret).status_code == 200
'''
from websocket import WebSocketApp
from urllib.parse import urlencode
from flask import Flask, request
from pandas import to_numeric
from time import sleep, time
import numpy as np
import traceback
import datetime
import requests
import hashlib
import hmac
import json
import sys
from yaza.client import client_app, page_not_found
from yaza.optimize import optimize, backtest
from yaza.paperwallet import PaperWallet
from yaza.plot import plot
from yaza.utils.types import *
from yaza.utils.utils import *
BASE_URL = 'https://api.binance.us/api/v3/'
TRADING_FEE = 0.001
# Binance.US Specific Websocket
class Websocket:
def __init__(self, pair: str, interval: str, callback: Callable, tld: str = 'us'):
self.ws = WebSocketApp(
f"wss://stream.binance.{tld}:9443/ws/{pair.lower()}@kline_{interval}",
on_message = lambda ws, msg: self.on_message(msg, callback),
on_error = lambda ws, err: print(f"[{datetime.datetime.now().isoformat(' ', 'seconds')}] {err}"),
on_close = lambda ws: print(f"[{datetime.datetime.now().isoformat(' ', 'seconds')}] websocket closed"),
on_open = lambda ws: print(f"[{datetime.datetime.now().isoformat(' ', 'seconds')}] websocket opened")
)
thread = None
keep_running = False
def run(self):
self.keep_running = True
while(self.keep_running):
self.ws.run_forever()
def async_start(self):
self.thread = Thread(target=self.run)
self.thread.start()
def stop(self):
self.keep_running = False
self.ws.close()
def on_message(self, msg, callback):
try:
msg = json.loads(msg)
time = datetime.datetime.fromtimestamp(float(msg['k']['t']/1000)).strftime('%Y-%m-%d %H:%M:%S')
row = {'time': time, 'open': float(msg['k']['o']), 'high': float(msg['k']['h']), 'low': float(msg['k']['l']), 'close': float(msg['k']['c']), 'volume': float(msg['k']['v'])}
callback(row)
except:
print('** something went wrong **')
print(traceback.print_exc())
# get historical OHLCV candles
def get_backdata(pair: Pair, interval: str, ticks: int) -> DataFrame:
klines = []
max_tick_request = 1000
epoch_to_iso = lambda t: datetime.datetime.fromtimestamp(float(t / 1000),tz=reference.LocalTimezone()).strftime('%Y-%m-%d %H:%M:%S')
cols = ["time", "open", "high", "low", "close", "volume", "close_time", "qav", "trade_count", "taker_bav", "taker_qav", "ignore"]
drop_cols = ["close_time", "qav", "trade_count", "taker_bav", "taker_qav", "ignore"]
num_cols = ["open", "high", "low", "close", "volume"]
data = DataFrame(columns=cols)
if ticks > max_tick_request:
curr_epoch = int(datetime.datetime.now(tz=reference.LocalTimezone()).timestamp())
since_epoch = since(interval, ticks, curr_epoch)
epoch_count = math.ceil(ticks/max_tick_request) + 1
epochs = np.linspace(since_epoch*1000, curr_epoch*1000, epoch_count, dtype=int)[:-1]
for epoch in epochs:
params = {'symbol': pair, 'interval': interval, 'startTime': epoch, 'limit': max_tick_request}
klines = json.loads(requests.get('https://api.binance.us/api/v3/' + 'klines', params=params).text)
temp = DataFrame(klines, columns=cols)
temp['time'] = temp['time'].apply(epoch_to_iso)
temp = temp.set_index('time')
temp = temp.drop_duplicates()
data = data.append(temp.loc[temp.index.difference(data.index),:])
data = data.drop(drop_cols, axis=1)
data[num_cols] = data[num_cols].apply(to_numeric, axis=1)
else:
params = {'symbol': pair, 'interval': interval, 'limit': ticks}
klines = json.loads(requests.get(BASE_URL + 'klines', params=params).text)
data = DataFrame(klines, columns=cols).drop(["close_time", "qav", "trade_count", "taker_bav", "taker_qav", "ignore"], axis=1)
data[["open", "high", "low", "close", "volume"]] = data[["open", "high", "low", "close", "volume"]].apply(to_numeric, axis=1)
data["time"] = data["time"].apply(epoch_to_iso)
data = data.set_index("time")
return data.loc[~(data.index.duplicated(False))].sort_index()
# get all pairs info
def get_all_pairs(symbol_s: Optional[Union[str, List[str]]] = None) -> Union[Pair, List[Pair]]:
try:
endpoint = 'exchangeInfo'
if symbol_s:
if isinstance(symbol_s, str): endpoint += ('?symbol='+symbol_s)
else: endpoint += ('?symbols=['+','.join(['\"'+a+'\"' for a in symbol_s])+']')
exchange_info = requests.get(BASE_URL + endpoint).json()
prices = {p['symbol']: float(p['price']) for p in requests.get(BASE_URL + 'ticker/price').json()}
pair_list = [Pair(
symbol=s['symbol'],
left=s['baseAsset'],
right=s['quoteAsset'],
left_min=float(next((f for f in s['filters'] if f['filterType'] == 'LOT_SIZE'), None)['minQty']),
right_min=float(next((f for f in s['filters'] if f['filterType'] == 'MIN_NOTIONAL'), None)['minNotional']),
left_prec=f"{float(s['filters'][2]['stepSize']):g}"[::-1].find('.'),
right_prec=int(s['quoteAssetPrecision']),
curr_price=prices[s['symbol']]
) for s in exchange_info['symbols']]
if symbol_s and isinstance(symbol_s, str):
return pair_list[0]
return pair_list
except:
print('** FAILED TO GET ALL PAIRS **')
print(traceback.print_exc())
# get Pair object given symbol
def get_pair(symbol: str) -> Pair:
try: return get_all_pairs(symbol)
except: print(symbol, 'is not a valid pair.')
class Exchange:
__key, __secret = None, None
def __init__(self, strategy: Callable[[DataFrame,dict],DataFrame], config: dict, key: Optional[str] = None, secret: Optional[str] = None):
self.strategy = strategy
self.config = config
if key and secret:
self.__key = key,
self.__secret = secret
ws = None
data = {}
CURR_ORDER_ID = None
# optimize strategy config using data from Binance
def optimize(self,
symbol: str,
interval: str,
strategy: Callable[[DataFrame, dict], DataFrame],
config_range: dict,
) -> dict:
data = get_backdata(symbol, interval, min_ticks=5000)
return optimize(data, strategy, config_range)
# backtest strategy+config using data from Binance
def backtest(self,
symbol: str,
interval: str,
strategy: Callable[[DataFrame, dict], DataFrame],
config: dict,
plot: bool = False
) -> DataFrame:
data = get_backdata(symbol, interval, min_ticks=1000)
return backtest(data, config, 0.001, plot)
# make private api request
def __authenticated_request(self, http_method, endpoint, params={}):
def dispatch_request(http_method):
session = requests.Session()
session.headers.update({
'Content-Type': 'application/json;charset=utf-8',
'X-MBX-APIKEY': self.__key
})
return {
'GET': session.get,
'DELETE': session.delete,
'PUT': session.put,
'POST': session.post,
}.get(http_method, 'GET')
query_string = urlencode(params, True)
query_string = query_string + ('&' if query_string else '') + 'timestamp=' + str(int(1000*time()))
hashed = hmac.new(self.__secret.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()
url = BASE_URL + endpoint + '?' + query_string + '&signature=' + hashed
params = {'url': url, 'params': {}}
response = dispatch_request(http_method)(**params)
return response.json()
# create any possible type of order
def create_order(self,
symbol: str,
side: str,
type: str,
quantity: Optional[float] = None,
quoteOrderQty: Optional[float] = None,
price: Optional[float] = None,
stopPrice: Optional[float] = None,
test: bool = False
):
pair = get_pair(symbol)
endpoint = 'order/test' if test else 'order'
params = {
'symbol': pair['pair'],
'side': side.upper(),
'type': type
}
if quantity: params['quantity'] = safe_round(quantity * (1-TRADING_FEE), pair['left_precision'])
if quoteOrderQty: params['quoteOrderQty'] = safe_round(quoteOrderQty * (1-TRADING_FEE), pair['right_precision'])
self.CURR_ORDER_ID = self.__authenticated_request('POST', endpoint, params)['orderId']
print(f'** PLACED ORDER ({params}) **')
# get order by self.CURR_ORDER_ID
def get_curr_order(self, data: DataFrame) -> Optional[dict]:
if not self.CURR_ORDER_ID: return None
try:
order = self.__authenticated_request('GET', 'allOrders', {'orderId': self.CURR_ORDER_ID})[0]
if order['status'] == 'FILLED':
if order['side'] == 'BUY':
data.loc[data.index[-1], "bought"] = float(order['price'])
else:
data.loc[data.index[-1], "sold"] = float(order['price'])
elif not order['status'] in ['NEW','PARTIALLY_FILLED']:
self.CURR_ORDER_ID = None # order failed
return None
return order
except:
self.CURR_ORDER_ID = None # order diesn't exist
return None
# cancel order by self.CURR_ORDER_ID
def cancel_curr_order(self):
self.__authenticated_request('DELETE', 'order', {'orderId': self.CURR_ORDER_ID})
self.CURR_ORDER_ID = None
# async thread for buying
def __buy_thread(self, data, symbol):
pair = get_pair(symbol)
balances = self.get_curr_balance()
price = pair.curr_price * (1+self.config['stop_limit_buy'])
qty = balances[pair.right] / price
if qty < pair['left_min']: return
self.create_order(symbol, 'BUY', 'STOP_LOSS', quantity=qty, price=price)
while(True):
order = self.get_curr_order(data)
if (not order) or order['side']=='SELL': return # buy order was cancelled
if order['status'] in ['NEW','PARTIALLY_FILLED']: sleep(10)
elif order['status'] == 'FILLED':
data.loc[data.index[-1], "bought"] = float(order['price'])
pair = get_pair(symbol)
balances = self.get_curr_balance()
price = pair.curr_price * (1-self.config['stop_limit_loss'])
qty = balances[pair.left]
if qty < pair['left_min']: return
self.create_order(symbol, 'SELL', 'STOP_LOSS', quantity=qty, price=price)
return
# buy, then stop_loss after fill
def async_buy(self, data, symbol):
order = self.get_curr_order(data)
pair = get_pair(symbol)
if order:
if order['side']=='SELL' or pair.curr_price < order['price']:
self.cancel_curr_order()
else: return # better buy order exists
thread = Thread(target=self.__buy_thread, args=[data, symbol])
thread.start()
# sell
def sell(self, data, symbol):
order = self.get_curr_order(data)
pair = get_pair(symbol)
if order:
if order['side']=='BUY' or pair.curr_price > order['price']:
self.cancel_curr_order()
else: return # better sell order exists
balances = self.get_curr_balance()
price = pair.curr_price * (1-self.config['stop_limit_sell'])
qty = balances[pair.left]
if qty < pair['left_min']: return
self.create_order(symbol, 'SELL', 'STOP_LOSS', quantity=qty, price=price)
# get current balance of all assets as dict
def get_curr_balance(self) -> Dict[str,float]:
return {b['asset']: float(b['free']) for b in self.__authenticated_request('GET', 'account')['balances'] if float(b['free']) > 0}
# get total balance of all assets in USD as float
def quote_curr_balance(self, symbol: Optional[str] = None):
balances = self.get_curr_balance()
if symbol:
pair = get_pair(symbol)
return balances[pair.right] + (balances[pair.left] * pair.curr_price)
usd = balances.pop('USD')
others = [get_pair(k+'USD').curr_price * v for k,v in balances.items()]
return usd + sum(others)
# for each websocket ping, update on new tick
def on_message(self, symbol, row):
try:
pair = get_pair(symbol)
time = row.pop('time')
if time in self.data.index.values:
self.data.loc[time, row.keys()] = row.values()
else:
print(f'[{time}] {row}')
self.data = self.data.append(DataFrame(row, index=[time])).tail(max(500,self.config['min_ticks']))
self.data = self.strategy(self.data, self.config)
if self.paper_wallet:
self.data = self.paper_wallet.update(self.data, pair.left, pair.right)
self.data.loc[self.data.index[-1], "balance"] = self.paper_wallet.get_balance(pair.left, pair.right, pair.curr_price)
else:
if self.data['buy'].iat[-1]:
self.async_buy(self.data, symbol)
elif self.data['sell'].iat[-1]:
self.sell(self.data, symbol)
self.data.loc[self.data.index[-1], "balance"] = self.quote_curr_balance(symbol)
except:
print('** BAD TICK **')
print(traceback.print_exc())
# async run api function
def api(self):
app = Flask(__name__)
@app.route("/balance", methods=['GET'])
def get_balance():
if self.paper_wallet: return self.paper_wallet.balance
return self.get_balances()
@app.route("/data", methods=['GET'])
def get_data():
return self.data.to_json()
@app.route("/curr_order", methods=['GET'])
def get_curr_order():
if self.paper_wallet:
return 'TODO: get current paper order'
return self.get_curr_order(self.data)
@app.route("/paper_wallet", methods=['GET', 'POST'])
def get_set_paper_wallet():
if request.method == 'POST':
for k,v in request.form:
if k == 'verbose': self.paper_wallet.verbose = bool(v)
elif k == 'debug': self.paper_wallet.debug = bool(v)
elif k == 'balance': self.paper_wallet.balance = json.loads(v)
return
elif request.method == 'GET':
return {
'balance': self.paper_wallet.balance,
'stop_buy': self.paper_wallet.stop_buy,
'stop_sell': self.paper_wallet.stop_sell,
'stop_loss': self.paper_wallet.stop_loss,
'open_trade': self.paper_wallet.open_trade
}
@app.route("/config", methods=['GET', 'POST'])
def get_set_config():
if request.method == 'GET':
return self.config
elif request.method == 'POST':
self.config = json.loads(request.form['config'])
self.paper_wallet.stop_limit_buy = self.config['stop_limit_buy']
self.paper_wallet.stop_limit_sell = self.config['stop_limit_sell']
self.paper_wallet.stop_limit_loss = self.config['stop_limit_loss']
return
@app.route("/plot", methods=['GET'])
def get_plot():
return plot(self.data, as_html=True)
@app.route("/", methods=['GET'])
def get_app():
plot_html = plot(self.data, as_html=True, embed=True)
balance = self.paper_wallet.balance if self.paper_wallet else self.get_curr_balance()
return client_app(balance, plot_html)
@app.errorhandler(404)
def page_not_found(error):
return page_not_found()
if sys.platform in ['linux','linux2']: app.run(host='0.0.0.0', port=5000)
elif sys.platform == 'darwin': app.run(port=8000)
api_thread = None
# start websocket, api, and live trading
def start_trading(self, symbol: str, interval: str):
pair = get_pair(symbol)
if not (self.__key or self.__secret):
self.paper_wallet = PaperWallet(self.config, init_balance={pair.left: 0.0, pair.right: 100.0}, init_pairs=[pair.left+pair.right], verbose=True, debug=True)
self.data = get_backdata(symbol, interval, max(100, self.config['min_ticks']))
self.data = self.strategy(self.data, self.config)
balance = self.quote_curr_balance(symbol) if self.__key and self.__secret else self.paper_wallet.get_balance(pair.left, pair.right, pair.curr_price)
self.data[['bought','sold','stop_lossed','balance']] = [None, None, None, balance]
self.ws = Websocket(symbol, interval, lambda row: self.on_message(symbol, row))
self.ws.async_start()
self.api_thread = KillableThread(target=self.api)
self.api_thread.start()
# stop websocket, api, and live trading
def stop_trading(self, symbol: str, selloff: bool = False):
self.api_thread.kill()
self.api_thread.join()
self.ws.stop()
order = self.get_curr_order(self.data)
if order:
self.cancel_curr_order()
if selloff and order['side']=='SELL':
pair = get_pair(order['symbol'])
self.create_order(pair.symbol, 'SELL', 'MARKET', self.get_curr_balance()[pair.left])
''' |
from unittest.mock import patch
import pytest
from feed_proxy import conf
reader_class = conf.ConfigReader
@pytest.mark.parametrize('string,expected', [
('', tuple()),
('cookie', ('cookie',)),
('icecream chocolate', ('icecream', 'chocolate')),
(' hop hey ', ('hop', 'hey')),
('поп корн', ('поп', 'корн')),
])
def test_convert_tuple(string, expected):
assert reader_class.convert_tuple(string) == expected
@pytest.mark.parametrize('string,expected', [
('{message}<br><br>{source_tags}', '{message}\n\n{source_tags}'),
('{message} <br> <br> {source_tags}', '{message} \n \n {source_tags}'),
])
def test_convert_template(string, expected):
assert reader_class.convert_template(string) == expected
@pytest.mark.parametrize('string,expected', [
('tag1', ('tag1',)),
('tag 1', ('tag 1',)),
('tag1,tag2', ('tag1', 'tag2')),
('tag1,tag 2', ('tag1', 'tag 2')),
('таг с кириллицей', ('таг с кириллицей',)),
('TAG with Uppercase', ('tag with uppercase',)),
])
def test_convert_excludepostbytags(string, expected):
assert reader_class.convert_excludepostbytags(string) == expected
@pytest.fixture()
def converters():
"""
Expected ConfigReader converters
"""
return {
'tuple': reader_class.convert_tuple,
'template': reader_class.convert_template,
'excludepostbytags': reader_class.convert_excludepostbytags,
}
def test_get_converters(converters):
assert reader_class.get_converters() == converters
@pytest.fixture()
def kwargs(converters):
"""
Expected ConfigReader kwargs
"""
return {'converters': converters}
def test_get_parser_kwargs(kwargs):
assert reader_class.get_parser_kwargs() == kwargs
def test_get_parser_kwargs_empty_converters(mocker, kwargs):
mocker.patch.object(reader_class, 'get_converters', return_value={})
assert reader_class.get_parser_kwargs() == {}
@patch.object(conf.configparser, 'ConfigParser', return_value=42)
def test_create_parser(mock_ConfigParser, kwargs):
assert reader_class.create_parser() == 42
mock_ConfigParser.assert_called_once_with(**kwargs)
@patch.object(reader_class, 'create_parser')
def test_read_from_file(mock_create_parser):
mock_parser = mock_create_parser.return_value
assert reader_class.read_from_file('/path') == mock_parser
mock_parser.read.assert_called_once_with('/path')
|
from collections import deque
class Treenode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution(object):
def addNode(self, val):
return Treenode(val)
def k_list_of_lists_btree(self, root, k):
if root is None:
return
levelOrderqueue = deque()
levelOrderqueue.append(root)
result = []
while True:
nodecount = len(levelOrderqueue)
if nodecount == 0 or k == 0:
break
temp_result = []
while nodecount > 0:
node = levelOrderqueue.popleft()
temp_result.append(node.val)
if node.left:
levelOrderqueue.append(node.left)
if node.right:
levelOrderqueue.append(node.right)
nodecount -= 1
result.append(temp_result)
k -= 1
return result
S = Solution()
root = S.addNode(1)
root.left = S.addNode(2)
root.right = S.addNode(3)
root.left.left = S.addNode(4)
root.left.right = S.addNode(5)
root.right.left = S.addNode(6)
root.right.right = S.addNode(7)
print S.k_list_of_lists_btree(root, 2) |
from bs4 import BeautifulSoup
from urllib.request import Request,urlopen
from urllib.parse import urljoin
from urllib.error import URLError
import time
import telepot
import json
import sys
import codecs
# download pip
# wget https://bootstrap.pypa.io/get-pip.py --no-check-certificate
# python -m pip install BeautifulSoup4
# python3 -m pip install telepot
# python3 -m pip install feedparser
CONFIG_FILE = 'setting.json'
def parseConfig(filename):
f = codecs.open(filename, 'r', "utf-8" )
js = json.loads(f.read())
f.close()
return js
def getConfig(config):
global TOKEN
global VALID_USERS
global SEARCH_KEYWORDS
global my_chat_id
TOKEN = config['common']['token']
VALID_USERS = config['common']['valid_users']
my_chat_id = VALID_USERS[0]
SEARCH_KEYWORDS = config['search_keywords']
config = parseConfig(CONFIG_FILE)
if not bool(config):
print ("Err: Setting file is not found")
exit()
getConfig(config)
print(config)
bot = telepot.Bot(TOKEN)
startMsg="Clien Monitor 서비스가 시작되었습니다."
bot.sendMessage(my_chat_id, startMsg)
#hide_keyboard = {'hide_keyboard': True}
#bot.sendMessage(my_chat_id, 'I am hiding it', reply_markup=hide_keyboard)
# 장터
base_url = "http://www.clien.net/cs2/bbs/board.php?bo_table=sold"
# 모두의 공원
#base_url = "http://www.clien.net/cs2/bbs/board.php?bo_table=park"
# @retry(URLError, tries=4, delay=3, backoff=2)
# def urlopen_with_retry(url_request):
# return urlopen(url_request).read()
# @retry(urllib2.URLError, tries=4, delay=3, backoff=2)
# def urlopen_with_retry():
# return urllib2.urlopen("http://example.com")
url_request = Request(base_url,headers={'User-Agent': 'Mozilla/5.0'})
for x in range(10): # Always limit number of retries
try:
clien_market_board = urlopen(url_request).read()
# resp = urllib.request.urlopen(req)
except URLError:
time.sleep(2)
raise # re-raise any other error
else:
break # We've got resp sucessfully, stop iteration
#clien_market_board = urlopen(url_request).read()
#clien_market_board = urlopen_with_retry(url_request)
bs4_clien = BeautifulSoup(clien_market_board,"html.parser")
find_mytr = bs4_clien.find_all("tr",attrs={'class':"mytr"})
base_id = int(find_mytr[0].find('td').get_text(strip=True))
while True:
#print("Read Clien board %s" % base_id)
#clien_market_board = urlopen(url_request).read()
while True:
try:
clien_market_board = urlopen(url_request).read()
break;
# resp = urllib.request.urlopen(req)
except URLError:
time.sleep(10)
print("URL Open Error and now retrying")
#bot.sendMessage(my_chat_id, "URL Open Error and now retrying")
pass
try:
bs4_clien = BeautifulSoup(clien_market_board,"html.parser")
find_mytr = bs4_clien.find_all("tr",attrs={'class':"mytr"})
#print(find_mytr[0].find('td').get_text(strip=True))
top_id = int(find_mytr[0].find('td').get_text(strip=True))
except:
print("top_id = %s" % top_id)
pass
for t in find_mytr:
#print(t.find('wr_id').get_text(strip=True))
current_id = int(t.find('td').get_text(strip=True))
category = t.find('td',attrs={'class':'post_category'}).get_text(strip=True)
item = t.find('td',attrs={'class':'post_subject'}).get_text(strip=True).encode('cp949','ignore').decode('cp949')
# print(current_id, base_id, category, your_search, item)
if current_id > base_id and category == "[판매]":
#print("base: %s, top : %s, current : %s" % (base_id, top_id, current_id))
for your_search in SEARCH_KEYWORDS:
#print(your_search)
if your_search in item:
#print(t)
#print(t.find('td',attrs={'class':'post_category'}).get_text(strip=True))
print("제목 : "+t.find('td',attrs={'class':'post_subject'}).get_text(strip=True).encode('cp949','ignore').decode('cp949'))
title = "제목 : "+t.find('td',attrs={'class':'post_subject'}).get_text(strip=True).encode('cp949','ignore').decode('cp949')+"\n"
print("url : "+urljoin(base_url,t.find('td',attrs={'post_subject'}).a.get('href')))
url_result = urljoin(base_url,t.find('td',attrs={'post_subject'}).a.get('href'))
result = title + url_result
# print("글쓴이 : "+t.find('td',attrs={'class' : 'post_name'}).get_text(strip=True))
bot.sendMessage(my_chat_id, result)
break
#print(base_id, top_id, current_id)
base_id = top_id
time.sleep(120)
|
import copy
import importlib
import math
import model.utils.dijakstra as dijakstra
import model.utils.dispatcher_utils as dispatcher_utils
from model.entities.vertex import Vertex
from model.entities.point import Point
import time
def run_algorithm_on_world(world, alg_name, alg_args, tpd):
vertexes = create_vertexes_from_visit_points(world)
algo = import_algorithm(alg_name)
milliseconds = int(round(time.time() * 1000))
patrol, alg_output = start_patrol(world, algo, alg_args, tpd, vertexes)
milliseconds = int(round(time.time() * 1000)) - milliseconds
statistic = caclulate_statistic(
world, vertexes, alg_name, tpd, milliseconds)
return {
'world': world,
'frames': patrol,
'statistic': statistic,
'alg_output': alg_output,
}
def create_vertexes_from_visit_points(world):
return [Vertex(vp['probability'], vp['starvation'], vp['position']) for vp in world['visit_points']]
def import_algorithm(alg_name):
try:
algo_module = importlib.import_module('model.algs.{}'.format(
alg_name)) # dynamic import of module by alg name
return getattr(algo_module, alg_name.capitalize())
except ModuleNotFoundError:
raise NameError("the algorithm: '{}' doesn't exist!".format(alg_name))
def start_patrol(world, algo_type, alg_args, tpd, vertexes):
distance_matrix = create_distance_matrix(world)
global_time = 0
robot = {
'position': vertexes[world['robot']['start_point']].point,
'current_vertex': world['robot']['start_point'],
'angle': world['robot']['start_angle'],
'walk_speed': world['robot']['walk_speed'],
'rotation_speed': world['robot']['rotation_speed'],
}
patrol = []
algo = algo_type(world, distance_matrix, alg_args)
algo.start() # some of the algorithms has preprocessing to do so the dispatcher let them do the calculations first
while global_time < tpd:
next_vertex = algo.next_step(
robot['current_vertex'], vertexes, global_time)
# get the frames of the current path
frames_of_path = path_to_goal(robot, world, next_vertex)
# check each frame if it has an unexcpected visit (if a visit-point is in the best way to target)
for i, f in enumerate(frames_of_path):
v_props = []
for j, v in enumerate(vertexes):
if f['position'] == v.point:
v.visit(global_time + i)
v_props.append({ # adding metadata to each frame to display later in gui view
'last_visit': v.lv,
'total_starvation': v.ts,
'is_target': next_vertex == j,
})
f['vertexes'] = v_props
robot['current_vertex'] = next_vertex
# update robot's angle to last angle in path
robot['angle'] = frames_of_path[-1]['angle']
global_time += len(frames_of_path)
vertexes[next_vertex].visit(global_time)
patrol.extend(frames_of_path)
# collecting output from the algorithm if exists (for example: the clusters)
alg_output = algo.output()
return patrol, alg_output
def path_to_goal(robot, world, next_vertex):
return complex_path_steps(world, robot['angle'], robot['current_vertex'], next_vertex)
def create_distance_matrix(world):
vps = world['visit_points']
# matrix = [[0] * len(vps)] * len(vps)
matrix = []
for i, _ in enumerate(vps):
array = []
for j, _ in enumerate(vps):
array.append(complex_path_length(world, i, j))
matrix.append(array)
return matrix
def complex_path_length(world, vp_src, vp_dst):
return len(complex_path_steps(world, 0, vp_src, vp_dst))
def complex_path_steps(world, current_angle, vp_src, vp_dst):
path = dijakstra.get_points_path_with_dijakstra(
world, world['visit_points'][vp_src]['position'], world['visit_points'][vp_dst]['position'])
frames = [{'position': path[0], 'angle':current_angle}]
if len(path) > 1:
for i in range(0, len(path) - 1):
frames += simple_path_steps(path[i], frames[-1]['angle'], path[i + 1],
world['robot']['walk_speed'], world['robot']['rotation_speed'])
return frames
def simple_path_steps(p_src, current_angle, p_dst, walk_speed, rotation_speed):
frames = []
target_angle = dispatcher_utils.calculate_new_angle(p_src, p_dst)
frames = dispatcher_utils.get_rotation_frames(
current_angle, target_angle, p_src, rotation_speed)
total_distance = int(
math.sqrt(((p_dst.x - p_src.x)**2) + ((p_dst.y - p_src.y)**2)))
for step in range(0, total_distance, walk_speed):
frames.append({'angle': target_angle, 'position': Point(
p_src.x + math.cos(target_angle) * step, p_src.y + math.sin(target_angle) * step)})
frames.append({'angle': target_angle, 'position': p_dst})
return frames
def caclulate_statistic(world, vertexes, alg_name, tpd, milliseconds):
for v in vertexes: # visit all vertexes at the end of the patrol to sum up the cst of all the vertexes
v.visit(tpd)
stat = {
'total_price': sum([v.ts * v.p for v in vertexes]),
'alg_name': alg_name,
'tpd': tpd,
'num_of_vertexes': len(vertexes),
'runtime': str(milliseconds)
}
return stat
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import threading
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
# from tensorflow.python.training import queue_runner as qr
# from tensorflow.python.training.queue_runner import QueueRunner
try:
from tensorflow.contrib.training import FeedingQueueRunner
except ImportError:
from tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_queue_runner import FeedingQueueRunner
pass
class GenQueueRunner(FeedingQueueRunner):
def __init__(self, *args, **kwargs):
self._waitempty = kwargs.pop('waitempty', False)
super(GenQueueRunner, self).__init__(*args, **kwargs)
# self._isempty = tf.equal(self.queue.size(), 0)
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, feed_fn, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A `Session`.
enqueue_op: The `Operation` to run.
feed_fn: the feed function to pass to `sess.run`.
coord: Optional `Coordinator` object for reporting errors and checking
for stop conditions.
"""
if coord:
coord.register_thread(threading.current_thread())
waitempty = self._waitempty
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
try:
# NOTE: @dade if generator stop wait during consuming remained data
feed_dict = None if feed_fn is None else feed_fn()
# enqueue data
sess.run(enqueue_op, feed_dict=feed_dict)
except StopIteration:
if coord and waitempty:
# wait for dequeueing
while not coord.should_stop():
# with self._lock:
if sess.run(self.queue.size()) == 0:
raise StopIteration
raise StopIteration
except (errors.OutOfRangeError, errors.CancelledError, StopIteration):
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
|
import numpy as np
import pandas as pd
# IMPORTANT: DO NOT USE ANY OTHER 3RD PARTY PACKAGES
# (math, random, collections, functools, etc. are perfectly fine)
class DecisionTree:
def __init__(self):
# NOTE: Feel free add any hyperparameters
# (with defaults) as you see fit
pass
def fit(self, X, y):
"""
Generates a decision tree for classification
Args:
X (pd.DataFrame): a matrix with discrete value where
each row is a sample and the columns correspond
to the features.
y (pd.Series): a vector of discrete ground-truth labels
"""
# TODO: Implement
raise NotImplementedError()
def predict(self, X):
"""
Generates predictions
Note: should be called after .fit()
Args:
X (pd.DataFrame): an mxn discrete matrix where
each row is a sample and the columns correspond
to the features.
Returns:
A length m vector with predictions
"""
# TODO: Implement
raise NotImplementedError()
def get_rules(self):
"""
Returns the decision tree as a list of rules
Each rule is given as an implication "x => y" where
the antecedent is given by a conjuction of attribute
values and the consequent is the predicted label
attr1=val1 ^ attr2=val2 ^ ... => label
Example output:
>>> model.get_rules()
[
([('Outlook', 'Overcast')], 'Yes'),
([('Outlook', 'Rain'), ('Wind', 'Strong')], 'No'),
...
]
"""
# TODO: Implement
raise NotImplementedError()
# --- Some utility functions
def accuracy(y_true, y_pred):
"""
Computes discrete classification accuracy
Args:
y_true (array<m>): a length m vector of ground truth labels
y_pred (array<m>): a length m vector of predicted labels
Returns:
The average number of correct predictions
"""
assert y_true.shape == y_pred.shape
return (y_true == y_pred).mean()
def entropy(counts):
"""
Computes the entropy of a partitioning
Args:
counts (array<k>): a lenth k int array >= 0. For instance,
an array [3, 4, 1] implies that you have a total of 8
datapoints where 3 are in the first group, 4 in the second,
and 1 one in the last. This will result in entropy > 0.
In contrast, a perfect partitioning like [8, 0, 0] will
result in a (minimal) entropy of 0.0
Returns:
A positive float scalar corresponding to the (log2) entropy
of the partitioning.
"""
assert (counts >= 0).all()
probs = counts / counts.sum()
probs = probs[probs > 0] # Avoid log(0)
return - np.sum(probs * np.log2(probs))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-05 11:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0011_auto_20180405_1226'),
]
operations = [
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=0)),
],
),
migrations.RenameModel(
old_name='Order',
new_name='Cart',
),
migrations.RemoveField(
model_name='cartitems',
name='item',
),
migrations.RemoveField(
model_name='cartitems',
name='order',
),
migrations.DeleteModel(
name='CartItems',
),
migrations.AddField(
model_name='cartitem',
name='cart',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='store.Cart'),
),
migrations.AddField(
model_name='cartitem',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='store.Item'),
),
]
|
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
ecd = sys.getdefaultencoding()
print(ecd)
'''
import pymysql as MySQLdb
import tushare as ts
import pandas as pd
import lxml.html
from lxml import etree
import re
import time
from pandas.compat import StringIO
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import sys
import math
import conf as config
import chardet
startYear = config.REPORT_YEAR - 1
endYear = config.REPORT_YEAR
curQuarter = config.REPORT_QUARTER
isPy3 = (sys.version_info[0] >= 3)
items = ['date', 'holders_count', 'holders_chg', 'avg_stocks', 'avg_stocks_chg']
def get_stockholder_data(code):
url = 'http://quotes.money.163.com/f10/gdfx_%s.html' % (code)
tabClassName = 'table_bg001 border_box gudong_table'
#print(url)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
#print('1')
#res = chardet.detect(text)
#print(res)
#if isPy3:
text = text.decode('utf-8', 'ignore')
#print(text)
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"" + tabClassName + "\"]/tr")
if isPy3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
#print(sarr)
if len(sarr) == 0:
return None
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = items
print('data got success!')
return df
except Exception as e:
print('error')
return None
pass
def get_date_year(date):
year = date[:4]
return year
def get_date_month(date):
month = date[5:7]
return month
def get_date_quarter(date):
month = get_date_month(date)
if month == '03':
return '1'
elif month == '06':
return '2'
elif month == '09':
return '3'
elif month == '12':
return '4'
else:
return '0'
def save_stockholder_data(code, date, holders_count, holders_chg, avg_stocks, avg_stocks_chg):
#print('01')
year = get_date_year(date)
season = get_date_quarter(date)
if season == '0':
#print('02')
return
#print('03')
table_name = 'stock_stockholder_' + str(year) + 'q' + str(season)
db = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='123456',db='stock',charset='utf8')
cursor = db.cursor()
createDBSql = 'create table if not exists ' + table_name + '(code varchar(10) not null primary key, date text, holders_count text, holders_chg text, avg_stocks text, avg_stocks_chg text)'
cursor.execute(createDBSql)
prefix = 'insert into ' + table_name + '(code, date, holders_count, holders_chg, avg_stocks, avg_stocks_chg) values(\'%s\', \'%s\', \'%s\', \'%s\', \'%s\', \'%s\')'
sql = prefix % (code, date, holders_count, holders_chg, avg_stocks, avg_stocks_chg)
cursor.execute(sql)
db.commit()
print(code + ' ' + year + 'Q' + season)
db.close()
def achieve_stockholder_data(code):
df = get_stockholder_data(code)
if df is None:
print('df is null')
return
for i in range(0, len(df)):
date = str(df['date'][i])
if date == 'nan':
#print('11')
continue
year = int(get_date_year(date))
quarter = int(get_date_quarter(date))
if year < startYear:
#print('22, ' + str(year))
continue
if year == startYear and quarter <= curQuarter:
#print('33, ' + str(quarter))
continue
try:
#fencoding = chardet.detect(df['holders_chg'][i])
#print(df['holders_chg'][i])
save_stockholder_data(code, df['date'][i], df['holders_count'][i], df['holders_chg'][i], df['avg_stocks'][i], df['avg_stocks_chg'][i])
except Exception as e:
print('err')
pass
def achieve_all_stockholder_data():
f = open("stock_codes.txt")
for line in f:
code = line.strip()
achieve_stockholder_data(code)
print('Finish achieve all stockholder.')
achieve_all_stockholder_data()
#achieve_stockholder_data('000662') |
def score(planete, nb_satellite_reel):
while planete != -1:
phrase = "Nombre de satellite(s) de la planète " + planete + ": "
sat =int(input(phrase))
while sat - nb_satellite_reel != 0:
if sat - nb_satellite_reel >0:
print("trop haut, retente !")
sat = int(input(phrase))
if sat - nb_satellite_reel < 0:
print("trop bas, retente !")
sat = int(input(phrase))
if sat - nb_satellite_reel == 0:
print()
print("bravo, tu as trouvé le nombre de satellite(s) de", planete, "qui est de", nb_satellite_reel, "!")
print()
planete = -1
return planete
sat = -1
planete = 0
P = ("Mercure, Vénus, Terre, Mars, Jupiter, Saturne, Uranus et Neptune !")
while planete!="404": #condition pour que le programme ne s'arrete pas
if planete == 0:#résumé du but du programme et annonce de la planète choisie
sat = -1
P = ("Mercure, Vénus, Terre, Mars, Jupiter, Saturne, Uranus et Neptune !")
print("Ce programme a pour but de vous faire découvrir le nombre de satellites naturels que possèdent les planètes de notre système solaire !")
print()
print("Tu auras donc le choix entre les huit planètes de notre système, dans l'ordre:")
print(P)
print()
print("Pour quelle planète voudras-tu découvrir le nombre de satellites qu'elle possède ?")
planete = str(input("nom de la planète choisie: ").capitalize().strip()) #premiere demande de la planete choisie
print()
if planete == -1:
print("Tu peux taper 404 pour quitter le programme ou bien continuer en choisissant une nouvelle planète !")
print()
print("Je te rappelle les planètes du système solaire pour que tu puisses en choisir une autre:")
print(P)
print()
planete = str(input("Nom de la planète choisie: ").capitalize().strip())
print()
elif planete=="Mercure" or planete=="Vénus" or planete=="Terre" or planete=="Mars" or planete=="Jupiter" or planete=="Saturne" or planete=="Uranus" or planete=="Neptune":
print("ok, tu vas devoir deviner le nombre de satellite(s) que la planète", planete, "possède !")
if planete == "Mercure" or planete == "mercure":
planete = score(planete, 0)
if planete == "Vénus" or planete == "vénus":
planete = score(planete, 0)
if planete == "Terre" or planete == "terre":
planete = score(planete, 1)
if planete == "Mars" or planete == "mars":
planete = score(planete, 2)
if planete == "Jupiter" or planete == "jupiter":
planete = score(planete, 79)
if planete == "Saturne" or planete == "saturne":
planete = score(planete, 62)
if planete == "Uranus" or planete == "uranus":
planete = score(planete, 27)
if planete == "Neptune" or planete == "neptune":
planete = score(planete, 14)
else:
print("Vérifie bien l'orthographe du nom des planêtes ! :)")
print("La liste des planètes est:", P)
print()
planete = str(input("Nom de la planète choisie: ").capitalize().strip())
print()
|
'''
Created on May 5, 2013
@author: Remus
The script create swords which follow another sword.
Use Expression Editor (Windows -> Animation Editors -> Expression Editor)
Use scene "Swords.ma".
'''
import maya.cmds as mc
def lag(frame, goal, follower, lagAmount):
'''
This function move and rotate the swordFollower by swordGoal
This function is used in Expression Editor called "swordExpresion":
python ("lag(" + frame + ", 'swordGoal', 'swordFollower1', 0.3)");
" + frame + " we are using "+" for concatenation because "frame" is a MEL command
'''
goalTrans = mc.getAttr(goal + ".translate", t=frame-lagAmount)
goalRot = mc.getAttr(goal + ".rotate", t=frame-lagAmount)
mc.setAttr(follower + ".translate", goalTrans[0][0], goalTrans[0][1], goalTrans[0][2])
mc.setAttr(follower + ".rotate", goalRot[0][0], goalRot[0][1], goalRot[0][2])
|
# Generated by Django 2.2.6 on 2020-11-04 05:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('StudentManagement', '0005_auto_20201103_2157'),
]
operations = [
migrations.AlterModelTable(
name='calificaciones',
table='calificacion',
),
]
|
import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
# ---------------------------------------------SCREEN SETUP-----------------------------------------------
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
# -----------INITIALISATION OF PLAYER, CAR MANAGER FOR CARS AND SCOREBOARD FOR OUR GAME-----------------
player = Player()
cars = CarManager()
level = Scoreboard()
game_is_on = True
# ---------------------------------KEY BINDING FOR TURTLE'S MOVEMENT--------------------------------------
screen.listen()
screen.onkeypress(fun=player.move_up, key='w')
collision = 0
# ---------------------------------------GAME FLOW BEGINS--------------------------------------------------
while game_is_on:
cars.move_car()
cars.update_car()
time.sleep(0.1)
for car in cars.car_list:
if player.distance(car) < 20:
game_is_on = False
if player.ycor() > 270:
player.refresh()
cars.level_up()
level.next_level()
screen.update()
level.game_over()
screen.exitonclick()
|
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local[*]").setAppName("employees")
sc = SparkContext(conf = conf)
employees = [['Raffery',31], ['Jones',33], ['Heisenberg',33], ['Robinson',34], ['Smith',34]]
department = [31,33]
employees = sc.parallelize(employees)
department = sc.parallelize(department)
dept = department.collect()
employeesWithValidDeptRdd = employees.filter(lambda e: e[1] in dept).collect()
print(employeesWithValidDeptRdd)
|
"""ops.syncretism.io model"""
__docformat__ = "numpy"
import configparser
import logging
from typing import Dict, Tuple
import pandas as pd
import yfinance as yf
from openbb_terminal.core.config.paths import (
MISCELLANEOUS_DIRECTORY,
)
from openbb_terminal.core.session.current_user import get_current_user
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import request
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.options import yfinance_model
logger = logging.getLogger(__name__)
accepted_orders = [
"e_desc",
"e_asc",
"iv_desc",
"iv_asc",
"md_desc",
"md_asc",
"lp_desc",
"lp_asc",
"oi_asc",
"oi_desc",
"v_desc",
"v_asc",
]
@log_start_end(log=logger)
def get_historical_greeks(
symbol: str, expiry: str, strike: float, chain_id: str = "", put: bool = False
) -> pd.DataFrame:
"""Get histoical option greeks
Parameters
----------
symbol: str
Stock ticker symbol
expiry: str
Option expiration date
strike: float
Strike price to look for
chain_id: str
OCC option symbol. Overwrites other inputs
put: bool
Is this a put option?
Returns
-------
df: pd.DataFrame
Dataframe containing historical greeks
"""
if not chain_id:
options = yfinance_model.get_option_chain(symbol, expiry)
options = options.puts if put else options.calls
chain_id = options.loc[options.strike == strike, "contractSymbol"].values[0]
r = request(f"https://api.syncretism.io/ops/historical/{chain_id}")
if r.status_code != 200:
console.print("Error in request.")
return pd.DataFrame()
history = r.json()
iv, delta, gamma, theta, rho, vega, premium, price, time = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for entry in history:
time.append(pd.to_datetime(entry["timestamp"], unit="s"))
iv.append(entry["impliedVolatility"])
gamma.append(entry["gamma"])
delta.append(entry["delta"])
theta.append(entry["theta"])
rho.append(entry["rho"])
vega.append(entry["vega"])
premium.append(entry["premium"])
price.append(entry["regularMarketPrice"])
data = {
"iv": iv,
"gamma": gamma,
"delta": delta,
"theta": theta,
"rho": rho,
"vega": vega,
"premium": premium,
"price": price,
}
df = pd.DataFrame(data, index=time)
return df
@log_start_end(log=logger)
def get_preset_choices() -> Dict:
"""
Return a dict containing keys as name of preset and
filepath as value
"""
PRESETS_PATH = (
get_current_user().preferences.USER_PRESETS_DIRECTORY / "stocks" / "options"
)
PRESETS_PATH_DEFAULT = MISCELLANEOUS_DIRECTORY / "stocks" / "options"
preset_choices = {}
if PRESETS_PATH.exists():
preset_choices.update(
{
filepath.name.strip(".ini"): filepath
for filepath in PRESETS_PATH.iterdir()
if filepath.suffix == ".ini"
}
)
if PRESETS_PATH_DEFAULT.exists():
preset_choices.update(
{
filepath.name.strip(".ini"): filepath
for filepath in PRESETS_PATH_DEFAULT.iterdir()
if filepath.suffix == ".ini"
}
)
return preset_choices
@log_start_end(log=logger)
def get_screener_output(preset: str = "high_iv.ini") -> Tuple[pd.DataFrame, str]:
"""Screen options based on preset filters
Parameters
----------
preset: str [default: "high_iv.ini"]
Chosen preset
Returns
-------
pd.DataFrame:
DataFrame with screener data, or empty if errors
str:
String containing error message if supplied
"""
d_cols = {
"contractSymbol": "Contract Symbol",
"expiration": "Expiration",
"symbol": "Ticker",
"optType": "Type",
"strike": "Strike",
"volume": "Vol",
"openInterest": "OI",
"impliedVolatility": "IV",
"delta": "Delta",
"gamma": "Gamma",
"rho": "Rho",
"theta": "Theta",
"vega": "Vega",
# "yield": "Y",
# "monthlyyield": "MY",,
# "regularMarketDayLow": "SMDL",
# "regularMarketDayHigh": "SMDH",
"lastTradeDate": "Last Traded",
"bid": "Bid",
"ask": "Ask",
"lastPrice": "Last",
# "lastCrawl": "LC",
# "inTheMoney": "ITM",
"pChange": "% Change",
"regularMarketPrice": "Underlying",
"priceToBook": "P/B",
}
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
choices = get_preset_choices()
if preset not in choices:
return pd.DataFrame(), "No data found"
preset_filter.read(choices[preset])
d_filters = {k: v for k, v in dict(preset_filter["FILTER"]).items() if v}
s_filters = str(d_filters)
s_filters = (
s_filters.replace(": '", ": ")
.replace("',", ",")
.replace("'}", "}")
.replace("'", '"')
)
for order in accepted_orders:
s_filters = s_filters.replace(f" {order}", f' "{order}"')
errors = check_presets(d_filters)
if errors:
return pd.DataFrame(), errors
link = "https://api.syncretism.io/ops"
res = request(link, headers={"Content-type": "application/json"}, data=s_filters)
# pylint:disable=no-else-return
if res.status_code == 200:
df_res = pd.DataFrame(res.json())
if df_res.empty:
return df_res, f"No options data found for preset: {preset}"
df_res = df_res.rename(columns=d_cols)[list(d_cols.values())[:20]]
df_res["Expiration"] = df_res["Expiration"].apply(
lambda x: pd.to_datetime(x, unit="s").strftime("%y-%m-%d")
)
df_res["Last Traded"] = df_res["Last Traded"].apply(
lambda x: pd.to_datetime(x, unit="s").strftime("%y-%m-%d")
)
# df_res["Y"] = df_res["Y"].round(3)
# df_res["MY"] = df_res["MY"].round(3)
return df_res, ""
else:
return pd.DataFrame(), f"Request Error: {res.status_code}"
# pylint: disable=eval-used
@log_start_end(log=logger)
def check_presets(preset_dict: dict) -> str:
"""Checks option screener preset values
Parameters
----------
preset_dict: dict
Defined presets from configparser
Returns
-------
error: str
String of all errors accumulated
"""
float_list = [
"min-iv",
"max-iv",
"min-oi",
"max-oi",
"min-strike",
"max-strike",
"min-volume",
"max-volume",
"min-voi",
"max-voi",
"min-diff",
"max-diff",
"min-ask-bid",
"max-ask-bid",
"min-exp",
"max-exp",
"min-price",
"max-price",
"min-price-20d",
"max-price-20d",
"min-volume-20d",
"max-volume-20d",
"min-iv-20d",
"max-iv-20d",
"min-delta-20d",
"max-delta-20d",
"min-gamma-20d",
"max-gamma-20d",
"min-theta-20d",
"max-theta-20d",
"min-vega-20d",
"max-vega-20d",
"min-rho-20d",
"max-rho-20d",
"min-price-100d",
"max-price-100d",
"min-volume-100d",
"max-volume-100d",
"min-iv-100d",
"max-iv-100d",
"min-delta-100d",
"max-delta-100d",
"min-gamma-100d",
"max-gamma-100d",
"min-theta-100d",
"max-theta-100d",
"min-vega-100d",
"max-vega-100d",
"min-rho-100d",
"max-rho-100d",
"min-sto",
"max-sto",
"min-yield",
"max-yield",
"min-myield",
"max-myield",
"min-delta",
"max-delta",
"min-gamma",
"max-gamma",
"min-theta",
"max-theta",
"min-vega",
"max-vega",
"min-cap",
"max-cap",
]
bool_list = ["active", "stock", "etf", "puts", "calls", "itm", "otm", "exclude"]
error = ""
for key, value in preset_dict.items():
if key in float_list:
try:
float(value)
if value.startswith("."):
error += f"{key} : {value} needs to be formatted with leading 0\n"
except Exception:
error += f"{key} : {value}, should be float\n"
elif key in bool_list:
if value not in ["true", "false"]:
error += f"{key} : {value}, Should be [true/false]\n"
elif key == "tickers":
for symbol in value.split(","):
try:
if yf.Ticker(eval(symbol)).fast_info["lastPrice"] is None:
error += f"{key} : {symbol} not found on yfinance"
except NameError:
error += f"{key} : {value}, {symbol} failed"
elif key == "limit":
try:
int(value)
except Exception:
error += f"{key} : {value} , should be integer\n"
elif key == "order-by" and value.replace('"', "") not in accepted_orders:
error += f"{key} : {value} not accepted ordering\n"
if error:
logging.exception(error)
return error
|
import cv2
from PIL import Image
from MessageBox import MessageBox
class ImageNegativeTransformation:
def ProcessTransformation(inputFile):
try:
im=Image.open(inputFile)
img = cv2.imread(inputFile)
cv2.imshow("Original Image",img)
img_not = cv2.bitwise_not(img)
cv2.imshow("ImageNegative",img_not)
cv2.waitKey(0)
cv2.destroyAllWindows()
# do stuff
except IOError:
MessageBox.showMessageBox('info','Please provide a valid Image File Path to Transform')
|
# Generated by Django 2.2.3 on 2019-09-24 22:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("main", "0019_remove_metadata_size_and_class")]
operations = [
migrations.AlterField(
model_name="worklistcolumn",
name="ordering",
field=models.IntegerField(
blank=True,
help_text="Order this column will appear in worklist export.",
null=True,
verbose_name="Ordering",
),
),
migrations.AddConstraint(
model_name="worklistcolumn",
constraint=models.UniqueConstraint(
condition=models.Q(ordering__isnull=False),
fields=("ordering", "template"),
name="unique_column_ordering",
),
),
]
|
# Questo script python permette di effettuare un'acquisizione automatica delle posizioni dei tag,
# utilizzando l'algoritmo di posizionamento della pozyx.
# La posizione dei tag determinata dall'algoritmo viene comunicata sulla linea UWB al dispositivo
# che è connesso alla porta USB del PC. Sullo schermo vengono stampate le posizioni dei due tag.
# E' possibile calibrare le ancore sia manualmente che automaticamente. I parametri da modificare
# si trovano nella funzione "main" in fondo al programma. Il programma è pensato per una
# configurazione minima che prevede l'utilizzo di 4 ancore per la calibrazione.
# Su linux:
# da terminale digitare >> sudo python3 test_UWB.py
import time
from rangesToPos import (rangesToPos, average)
from pypozyx import *
MAX_DISTANCE = 100000
class SerialTag:
def __init__(self, pozyx, anchor_ids, ranging_protocol, myUWBSettings=None, dimension = POZYX_3D, height=1000, algorithm=POZYX_POS_ALG_UWB_ONLY):
self.pozyx = pozyx
self.anchor_ids = anchor_ids
self.ranging_protocol = ranging_protocol
self.dimension = dimension
self.height = height
self.algorithm = algorithm
self.anchors = []
self.distances = []
self.old_time = {None:0}
self.UWBSettings = myUWBSettings
self.UWBChannel = myUWBSettings.channel
self.defaultUWBSettings = UWBSettings(5,0,2,8,11.5)
self.pos_error = 0 # Conta il numero di errori nel positioning del tag
def setup(self):
self.pozyx.setRangingProtocol(self.ranging_protocol)
self.pozyx.setUWBSettings(self.UWBSettings)
self.pozyx.setUWBChannel(self.UWBChannel)
print("Coordinate iniziali dei dispositivi della rete")
coordinates = Coordinates()
for anchor in self.anchor_ids:
status = self.pozyx.getDeviceCoordinates(anchor, coordinates, anchor)
if status == POZYX_FAILURE:
print ("Device " + str(hex(anchor)) + "list is empty")
print(hex(anchor), coordinates.x, coordinates.y, coordinates.z)
### Print UWB configuration results
for remote in anchor_ids:
self.printUWBSettings(remote)
self.printUWBSettings(None)
self.addAnchors() #Salve le ancore nella lista interna dei dispositivi
#dei due tag
self.printConfigurationResults()
print("Ancore aggiunte alla lista interna dei Tag")
def loop(self):
position = Coordinates()
status = self.pozyx.doPositioning(position, self.dimension, self.height, self.algorithm)
if status == POZYX_SUCCESS and position.x != 0 and position.y !=0 and position.z !=0:
self.printPosition(position)
else:
self.pos_error = self.pos_error + 1
print("Failed positioning")
def printConfigurationResults(self):
print("Anchors identified: ")
i=0
for anchor in self.anchors:
print("ANCHOR", i, ", 0x%0.4x, %s" % (anchor.network_id, str(anchor.pos)))
i += 1
print()
print()
def printPosition(self, position, remote_id=None):
"""Stampa a video il risultato dell'algoritmo di calibrazione del tag"""
new_time = int(round(time.time() * 1000))
print("Serial pozyx: x(mm): {pos.x} y(mm): {pos.y} z(mm): {pos.z}".format("0x%0.4x", pos=position), " Delta_t [ms]: ", new_time - self.old_time[remote_id])
self.old_time[remote_id] = new_time
def addAnchors(self):
status = self.pozyx.clearDevices()
coordinates = Coordinates()
for anchor_id in anchor_ids:
status &= self.pozyx.getDeviceCoordinates(anchor_id, coordinates, anchor_id)
self.anchors.append(DeviceCoordinates(anchor_id, 1, coordinates))
if status != POZYX_SUCCESS:
raise Exception(str(hex(anchor_id)) + ": failed to get anchor's coordinates from anchor's internal list")
quit()
for anchor in self.anchors:
status &= self.pozyx.addDevice(anchor)
if status == POZYX_FAILURE or status == POZYX_TIMEOUT:
raise Exception("Failed to add anchors to tag's device list")
quit()
def printUWBSettings(self, remote_id):
aux = UWBSettings()
status = self.pozyx.getUWBSettings(aux, remote_id)
if status == POZYX_SUCCESS:
if remote_id == None:
print("Serial ", aux.channel, aux.bitrate, aux.prf, hex(aux.plen), aux.gain_db)
else:
print(hex(remote_id), "Channel: ", aux.channel, "Bitrate: ", aux.bitrate,
"Prf: ", aux.prf, "Plen: ", hex(aux.plen), "Gain: ", aux.gain_db)
else:
print(hex(remote_id), "Failed to receive UWB Settings")
if __name__ == "__main__":
# Riconosce se ci sono dispositivi Pozyx connessi alla porta USB del PC
serial_port = get_first_pozyx_serial_port()
print(serial_port)
if serial_port is None:
raise Exception("No Pozyx connected. Check your USB cable or your driver")
quit()
pozyx = PozyxSerial(serial_port)
################################################################
#### -PARAMETRI- ####
################################################################
# ID delle ancore utilizzate
anchor_ids = [0x6e44, 0x6e7a, 0x6e6c, 0x6939] # !!! Seguire la convenzione di rangesToPos.py
# Ranging protocol: POZYX_RANGE_PROTOCOL_FAST or POZYX_RANGE_PROTOCOL_PRECISION
ranging_protocol = POZYX_RANGE_PROTOCOL_PRECISION
input_file = open("Parametri_UWB.txt")
channel = int(input_file.readline())
bitrate = int(input_file.readline())
prf = int(input_file.readline())
plen = int(input_file.readline())
gain = float(input_file.readline())
myUWBSettings = UWBSettings(channel, bitrate, prf, plen, gain)
# Inizializzo pozyx seriale
serial_tag = SerialTag(pozyx, anchor_ids, ranging_protocol, myUWBSettings)
# Effettuo il setup della rete
serial_tag.setup()
# Loop Function
try:
while True:
serial_tag.loop()
except KeyboardInterrupt:
print("interrupted!")
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import MySQLdb
import json
import chardet
from jsqbmysql import dev,test
from jsqblinux import loan,debit
from cuserid import searchuid,inver
from backstage import verify
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
uid=searchuid()
#后台机审,初审,复审
def stu(n):
if n=="0":
verify(inver())
else:
pass
#执行对应环境的脚本
if inver()[0:4]=="test":
status=str(test("select status from tb_user_loan_order where user_id="+uid+" order by id desc limit 1")[0][0])
stu(status)
loan(inver())#借款脚本
list=test("select order_id,user_id,money from tb_financial_loan_record where user_id="+uid+" order by id desc limit 1")
list2=test("select name from tb_loan_person where id="+uid)
hj=inver()+".test"
else:
status=str(dev("select status from tb_user_loan_order where user_id="+uid+" order by id desc limit 1")[0][0])
stu(status)
loan(inver())#借款脚本
list=dev("select order_id,user_id,money from tb_financial_loan_record where user_id="+uid+" order by id desc limit 1")
list2=dev("select name from tb_loan_person where id="+uid)
hj=inver()+".dev"
#改user_id
y=[]
for x in list:
y.append(x)
orderid=y[0][0]
user_id=y[0][1]
money=y[0][2]
realname=(str(list2[0][0])).decode("utf-8")
#打款回调
import urllib2,time
response=urllib2.urlopen("http://"+hj+".kdqugou.com/frontend/web/notify/test-callback?type=1&order_id="+orderid+"&code=0") ###code值0为成功
m=response.read().decode('utf-8')
d=json.loads(m)
k=d['code']
if int(k)==0:
print time.strftime("%Y-%m-%d %H:%M %p", time.localtime())+" "+orderid+" "+realname+u" 打款成功"
print m
else:
print u" 打款失败"
print m
|
#!/usr/bin/env python
# encoding=utf-8
# maintainer: rgaudin
from django.contrib import admin
from models import Message
class MessageAdmin(admin.ModelAdmin):
list_display = ('identity', 'date', 'direction', \
'get_status_display', 'text')
list_filter = ['direction', 'status']
search_fields = ['identity', 'text']
date_hierarchy = 'date'
admin.site.register(Message, MessageAdmin)
|
import folium
import pandas #to load csv file with data
data = pandas.read_csv("Volcanoes.txt")
lat = list(data["LAT"])
lon = list(data["LON"])
elevation = list(data["ELEV"])
def marker_color(elevation):
if elevation < 1000:
return 'green'
elif 1000 <= elevation < 3000:
return 'orange'
else:
return 'red'
#create a map object with folium and Leaflet.js
#html for pop-up window on markers
html = """<h4>Volcano information:</h4>
Height: %s m
"""
map = folium.Map(location = [38.58, -99.09],zoom_start=6, tiles = "Mapbox Bright")
feature_group = folium.FeatureGroup(name = 'Map')
for lt, ln, el in zip(lat, lon, elevation):
iframe = folium.IFrame(html=html % str(el), width=200, height=100)
feature_group.add_child(folium.CircleMarker(location = [lt, ln], popup = folium.Popup(iframe),
radius = 8, fill_color = marker_color(el), fill_opacity = 0.8, color = 'grey'))
map.add_child(feature_group)
map.add_child(folium.LayerControl())
map.save("map.html") |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-02-06 09:19
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20180119_1247'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'verbose_name': 'Продукция', 'verbose_name_plural': 'Продукции'},
),
migrations.RemoveField(
model_name='product',
name='description',
),
migrations.RemoveField(
model_name='product',
name='model',
),
migrations.RemoveField(
model_name='product',
name='price',
),
migrations.AddField(
model_name='product',
name='text',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, default=None, null=True, verbose_name='Текст к продукции'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(blank=True, default=None, max_length=64, null=True, verbose_name='Название продукции'),
),
]
|
str_numbers = input().split()
counter = int(input())
numbers = []
for num in str_numbers:
num = int(num)
numbers.append(num)
for _ in range(counter):
numbers.remove(min(numbers))
print(numbers)
|
#!/usr/bin/python
from set1 import *
import sys, getopt, socket
BUFFER_SIZE = 2048
port=4632
host="10.101.1.10"
def checkpoint2_oracle(plaintext, sock):
if (plaintext == ""):
plaintext = "NULL"
sock.send(plaintext)
#print "Sending: " + plaintext
resp = sock.recv(BUFFER_SIZE)
#print "Received: " + resp
return resp.decode("hex")
def splitIntoBlocks(data, blockSize):
o = []
while(data):
o.append(data[0:blockSize])
data = data[blockSize:]
return o
def addToDecryptionDictionary(prefixStr, crackDict, blockNumber, blockSize, sock):
for c in range(256):
block = getHexBlock(checkpoint2_oracle(prefixStr+chr(c), sock), blockNumber, blockSize)
crackDict[block] = chr(c)
def getHexBlock(ciphertext, i, blockSize):
block = ciphertext.encode('hex')[(i*blockSize)*2: (i+1)*blockSize*2]
#print "------"
#print block
#print (i*blockSize)*2
#print (i+1)*blockSize*2
#print "------"
return block
print "Connecting to port " + str(port)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect the socket to the port where the server is listening
server_address = (host, port)
#print >>sys.stderr, 'connecting to %s port %s' % server_address
sock.connect(server_address)
print 'Connected to %s port %s' % server_address
except socket.error:
print >>sys.stderr, "Could not connect to the server"
exit(1)
A="A"
# Discover block size by feeding ECB oracle increasing size inputs to discover the block size
ctxtlen=len(checkpoint2_oracle("", sock))
blockSize=-1
for i in range(64):
ctxt = checkpoint2_oracle(A*i, sock)
if len(ctxt) > ctxtlen:
blockSize = len(ctxt)-ctxtlen
#print blockSize
break
#print blockSize
# detect that it is using ECB mode
# Need to use 3 blocks of A since we don't know how long the prefix string is
if (isECB(checkpoint2_oracle(A*(3*blockSize), sock))):
usingECB=True
#print "Using ECB"
else:
print "Not using ECB"
exit(1)
blocks = splitIntoBlocks(checkpoint2_oracle(A*(3*blockSize), sock).encode('hex'), blockSize*2)
#print blocks;
# Find the blocks that match our input
x=-1
y=-1
for i, block in enumerate(blocks):
if (blocks[i] == blocks[i+1]):
x=i
y=i+1
break
#print x, y
# Next find the offset
# by sending 2 blocks of As, followed by a third that ends in a Z
# repeatedly increase the starting A's in the third block until you get the two repeated ciphertext blocks
# that were used to detect ECB mode
# the moment that they are equal, that means that the Z is pushed out into the next block
# The number of leading A's in that third block is the offset
offset=-1
for i in range(blockSize):
testplaintext = A*2*blockSize + A*i + 'Z'
#print testplaintext
ciphertext = checkpoint2_oracle(testplaintext, sock)
blocks=splitIntoBlocks(ciphertext.encode('hex'), blockSize*2)
if (blocks[x] == blocks[y]):
offset=i
break
print "Offset is ", offset
# Compute the number of unknown blocks
numberOfUnknownBlocks = len(checkpoint2_oracle("", sock))/blockSize - x
# Now we can proceed with the previous algorithm, just adding A*offset before our strings
# and adding x to the j block offset
crack={}
decryptedMessage = ""
# Decrypt each block, one at a time
# Added one to the numberof unknown blocks since if the offset is large, it doesn't decrypt the final block
for j in range(numberOfUnknownBlocks+1):
# j is the block I need to keep
# Decrypt the block iteratively, since you can
for i in range(blockSize):
addToDecryptionDictionary(A*offset + A*(blockSize-i-1)+decryptedMessage, crack, j+x, blockSize, sock)
block = getHexBlock(checkpoint2_oracle(A*offset + A*(blockSize-i-1), sock), j+x, blockSize)
# at the very end, after the last byte,
# the padding changes since the size of the message is changing
# So you cannot break that final byte
# so if the block that you obtain is not in the dictionary
# then you are done and can stop
if block in crack:
if (crack[block] == '\x01'):
# We are into the padding area. Maybe?
break
decryptedMessage += crack[block]
else:
# If its not there, we are done
break
print "Decrypted:"
print decryptedMessage.rstrip('\n') # Rstrip since the plaintext already includes a newline
|
import fnmatch
import os
import pyngram;
import Distribution_Utils;
import numpy as np;
from scipy import stats;
import generateGraphs;
def GetFastaFiles(dirName, searchRe):
matches = []
for root, dirnames, filenames in os.walk(dirName):
for filename in fnmatch.filter(filenames, searchRe):
matches.append(os.path.join(root, filename))
return matches
def GetNGramDistributionForFiles(fastaFiles, nLen):
freqList = []
probList = []
for faFile in fastaFiles:
freq, prob = Distribution_Utils.GetNgramDistributionForFile(faFile, nLen);
freqList.append(freq);
probList.append(prob);
return freqList, probList;
def CreateNGramList(faList):
cummulativeNGramList = {};
for seqDict in faList:
for key, value in seqDict.iteritems():
if cummulativeNGramList.has_key(key):
cummulativeNGramList[key].append(value)
else:
cummulativeNGramList[key] = [];
cummulativeNGramList[key] = [value]
return cummulativeNGramList;
def ComputeMeanAndStdError(SignalNGramList):
for key, value in SignalNGramList.iteritems():
SignalNGramList[key] = [round(np.mean(value), 4), round(stats.sem(value), 4)]
return SignalNGramList;
def ComputeNgramDistribution(dirName, nLen, graphName):
three_utr_freq, three_utr_prob = Distribution_Utils.Compute3UtrNgramDistibution(nLen);
#three_utr_prob = {'AA': 0.0878, 'AC': 0.0478, 'GT': 0.0542, 'AG': 0.0674, 'CC': 0.0604, 'TT': 0.1023, 'CG': 0.0121, 'GG': 0.0567, 'GC': 0.0468, 'AT': 0.0693, 'GA': 0.0568, 'TG': 0.0781, 'CT': 0.073, 'CA': 0.0678, 'TC': 0.0583, 'TA': 0.0602}
SignalFiles = GetFastaFiles(dirName, "Signal*.fa")
NoSignalFiles = GetFastaFiles(dirName, "NoSignal*.fa")
SignalFreqList, SignalProbList = GetNGramDistributionForFiles(SignalFiles, nLen)
NoSignalFreqList, NoSignalProbList = GetNGramDistributionForFiles(NoSignalFiles, nLen)
SignalNGramList = CreateNGramList(SignalProbList);
NoSignalNGramList = CreateNGramList(NoSignalProbList);
SignalNGramDict = ComputeMeanAndStdError(SignalNGramList);
NoSignalNGramDict = ComputeMeanAndStdError(NoSignalNGramList);
graphTitle = str(nLen) + "-gram Distribution for 3'UTR and Generated Fasta files"
graphName = dirName + "/" + graphName;
generateGraphs.PlotNGrams(SignalNGramDict, NoSignalNGramDict, three_utr_prob, graphTitle, graphName);
if __name__ == "__main__":
import sys;
dirName = sys.argv[1]
nLen = int(sys.argv[2])
graphName = sys.argv[3]
ComputeNgramDistribution(dirName, nLen, graphName)
|
# https://github.com/flaport/sax
from SiEPIC.install import install
install('sax')
def coupler(coupling=0.5):
kappa = coupling**0.5
tau = (1-coupling)**0.5
sdict = sax.reciprocal({
("in0", "out0"): tau,
("in0", "out1"): 1j*kappa,
("in1", "out0"): 1j*kappa,
("in1", "out1"): tau,
})
return sdict
coupler(coupling=0.3)
print(coupler(coupling=0.3))
def waveguide(wl=1.55, wl0=1.55, neff=2.34, ng=3.4, length=10.0, loss=0.0):
import numpy as np
dwl = wl - wl0
dneff_dwl = (ng - neff) / wl0
neff = neff - dwl * dneff_dwl
phase = 2 * np.pi * neff * length / wl
amplitude = np.asarray(10 ** (-loss * length / 20), dtype=complex)
transmission = amplitude * np.exp(1j * phase)
sdict = sax.reciprocal({("in0", "out0"): transmission})
return sdict
waveguide(length=100.0)
mzi, _ = sax.circuit(
netlist={
"instances": {
"lft": coupler,
"top": waveguide,
"rgt": coupler,
},
"connections": {
"lft,out0": "rgt,in0",
"lft,out1": "top,in0",
"top,out0": "rgt,in1",
},
"ports": {
"in0": "lft,in0",
"in1": "lft,in1",
"out0": "rgt,out0",
"out1": "rgt,out1",
},
}
)
type(mzi)
import numpy as np
wl = np.linspace(1.53, 1.57, 1000)
result = mzi(wl=wl, lft={'coupling': 0.3}, top={'length': 200.0}, rgt={'coupling': 0.8})
# Plot using Plotly:
import plotly.express as px
import pandas as pd # https://pandas.pydata.org/docs/user_guide/10min.html
# Two lines:
t1 = np.abs(result['in0', 'out0'])**2
t2 = np.abs(result['in0', 'out1'])**2
df = pd.DataFrame(np.stack((t1, t2)).transpose(), index=wl, columns=['Output 1','Output 2'])
fig = px.line(df, labels={'index':'Wavelength', 'value':'Transmission'}, markers=True)
fig.show()
|
"""Additionnal DDL for SQL Alchemy."""
from sqlalchemy.sql.ddl import _CreateDropBase
class _CreateDropBaseView(_CreateDropBase):
"""A base for Create and Drop View."""
def __init__(self, element, cascade=False, on=None, bind=None):
self.view = element
self.cascade = cascade
super().__init__(element, on=on, bind=bind)
class CreateView(_CreateDropBaseView):
"""Represent a CREATE VIEW statment."""
__visit_name__ = "create_view"
def __init__(self, element, on=None, bind=None):
"""Create a new CREATE VIEW statment."""
super().__init__(element, on=on, bind=bind)
class DropView(_CreateDropBaseView):
"""Represent a DROP VIEW statment."""
__visit_name__ = "drop_view"
def __init__(self, element, cascade=False, on=None, bind=None):
"""Create a new DROP VIEW statment."""
super().__init__(element, cascade=cascade, on=on, bind=bind)
class CreateMaterializedView(_CreateDropBaseView):
"""Represent a CREATE MATERIALIZED VIEW statment."""
__visit_name__ = "create_view"
def __init__(self, element, on=None, bind=None):
"""Create a new CREATE MATERIALIZED VIEW statment."""
super().__init__(element, on=on, bind=bind)
class DropMaterializedView(_CreateDropBaseView):
"""Represent a DROP MATERIALIZED VIEW statment."""
__visit_name__ = "drop_view"
def __init__(self, element, cascade=False, on=None, bind=None):
"""Create a new DROP MATERIALIZED VIEW statment."""
super().__init__(element, cascade=cascade, on=on, bind=bind)
|
# Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Nicola Peditto <npeditto@unime.it"
import imp
import inspect
import os
from iotronic_lightningrod.config import package_path
from iotronic_lightningrod.lightningrod import RPC_devices
from iotronic_lightningrod.lightningrod import SESSION
from iotronic_lightningrod.modules import Module
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class DeviceManager(Module.Module):
def __init__(self, board, session):
# Module declaration
super(DeviceManager, self).__init__("DeviceManager", board)
device_type = board.type
path = package_path + "/devices/" + device_type + ".py"
if os.path.exists(path):
device_module = imp.load_source("device", path)
LOG.info(" - Device " + device_type + " module imported!")
device = device_module.System()
dev_meth_list = inspect.getmembers(
device,
predicate=inspect.ismethod
)
RPC_devices[device_type] = dev_meth_list
self._deviceWampRegister(dev_meth_list, board)
board.device = device
else:
LOG.warning("Device " + device_type + " not supported!")
def finalize(self):
pass
def restore(self):
pass
def _deviceWampRegister(self, dev_meth_list, board):
LOG.info(" - " + str(board.type).capitalize()
+ " device registering RPCs:")
for meth in dev_meth_list:
if (meth[0] != "__init__") & (meth[0] != "finalize"):
# LOG.info(" - " + str(meth[0]))
rpc_addr = u'iotronic.' + board.uuid + '.' + meth[0]
# LOG.debug(" --> " + str(rpc_addr))
SESSION.register(meth[1], rpc_addr)
LOG.info(" --> " + str(meth[0]) + " registered!")
|
class Path:
def __init__(self):
self.id = 0
self.edges = []
def __repr__(self):
return "\nPathid:"+str(self.id)+"\nedges:"+str(self.edges)+"\ntransponder_id:"
def __str__(self):
return "\nPathid:"+str(self.id)+"\nedges:"+str(self.edges)+"\ntransponder_id:"
|
from django.db import models
from django.http import request
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView, FormView
from django.views.generic.base import View
from .models import Category, Post, Comment
from django.urls import reverse_lazy
from .forms import CommentForm
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class Home(ListView):
model = Post
template_name = "blog/home.html"
context_object_name = 'posts'
ordering = '-pub_date'
paginate_by = 3
@method_decorator(login_required, name='dispatch')
class Dashboard(View):
def get(self, reqeust, *args,**kwargs):
view = Home.as_view(
template_name = "blog/admin_page.html",
paginate_by = 3
)
return view(reqeust, *args, **kwargs)
class PostDisplay(DetailView):
model = Post
def get_object(self):
object = super(PostDisplay, self).get_object()
object.view_count+=1
object.save()
return object
def get_context_data(self, **kwargs):
context = super(PostDisplay, self).get_context_data(**kwargs)
context['comments'] = Comment.objects.filter(post=self.get_object())
context['form'] = CommentForm
return context
@method_decorator(login_required, name='dispatch')
class PostComment(FormView):
form_class = CommentForm
template_name = "blog/post_detail.html"
def form_valid(self, form):
form.instance.by = self.request.user
post = Post.objects.get(pk=self.kwargs['pk'])
form.instance.post = post
form.save()
return super(PostComment, self).form_valid(form)
def get_success_url(self):
return reverse('post_detail', kwargs = {'pk': self.kwargs['pk']})
class PostDetail(View):
def get(self, request, *args, **kwargs):
view = PostDisplay.as_view()
return view(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
view = PostComment.as_view()
return view(request, *args, **kwargs)
@method_decorator(login_required, name='dispatch')
class PostCreate(CreateView):
model = Post
fields = ("title", "content", "category")
def form_valid(self, form):
form.instance.author = self.request.user
return super(PostCreate, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
class PostUpdate(UpdateView):
model = Post
fields = ('title', 'content', 'category')
@method_decorator(login_required, name='dispatch')
class PostDelete(DeleteView):
model = Post
success_url = reverse_lazy('dashboard')
class PostCategory(ListView):
model = Post
template_name = 'blog/post_category.html'
context_object_name = "posts"
ordering = '-pub_date'
paginate_by = 3
def get_queryset(self):
self.category = get_object_or_404(Category, pk=self.kwargs['pk'])
return Post.objects.filter(category = self.category)
def get_context_data(self, **kwargs):
context = super(PostCategory, self).get_context_data(**kwargs)
context['category'] = self.category
return context |
from django.urls import path
from .views import (
comment_create_view,
comment_delete_view,
comment_update_view,
)
app_name = 'comments'
urlpatterns = [
path('create/<int:id>', comment_create_view, name='create'),
path('delete/<int:id>', comment_delete_view, name='delete'),
path('update/<int:id>', comment_update_view, name='update')
]
|
import os.path
import zope.interface
from plone.z3cform.templates import ZopeTwoFormTemplateFactory
from plone.z3cform.interfaces import IFormWrapper
from pmr2.app.browser.layout import FormWrapper
class IShjsLayoutWrapper(IFormWrapper):
"""
The interface for the SHJS layout wrapper.
"""
path = lambda p: os.path.join(os.path.dirname(__file__), p)
shjs_layout_factory = ZopeTwoFormTemplateFactory(
path('shjs_layout.pt'), form=IShjsLayoutWrapper)
class ShjsLayoutWrapper(FormWrapper):
"""\
This layout wrapper provides XML stylesheet declarations for the
rendering of MathML.
"""
zope.interface.implements(IShjsLayoutWrapper)
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action donner_points_tribut."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Donne des points de tribut au personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.donner_tribut, "Personnage", "Fraction")
@staticmethod
def donner_tribut(personnage, nb_points):
"""Donne des points de tribut au personnage.
Les paramètres à préciser sont :
* personnage - le personnage à qui l'on souhaite donner des points
* nb_points : le nombre de points (supérieur à 0) à donner.
Vous pouvez donner des points de tribut à des PNJ, même si cela
semble moins utile à plus long terme.
"""
points = int(nb_points)
if points <= 0:
raise ErreurExecution("nombre de points de tribut négatif ou nul")
s = "s" if points > 1 else ""
personnage.points_tribut += points
personnage << "Vous recevez {} point{s} de tribut !".format(
points, s=s)
|
# In Python, a truthy value is a value that translates to True when evaluated in
# a Boolean context. All values are truthy unless they're defined as falsy.
# All falsy values are as follows:
# False
# None
# 0
# []
# {}
# ""
# Create a function that takes an argument of any data type and returns 1 if it's
# truthy and 0 if it's falsy.
def is_truthy(val):
datalist = [False, None, 0, [], {}, ""]
if val not in datalist:
return 1
else:
return 0
print(is_truthy(0)) #➞ 0
print(is_truthy(False)) #➞ 0
print(is_truthy("")) #➞ 0
print(is_truthy("False")) #➞ 1
|
from rest_framework.test import APITestCase, APIClient
from django.contrib.auth import get_user_model
class TestSetUp(APITestCase):
def setUp(self):
self.product_url = '/products/'
self.product_data = {
'product_name': 'Pant',
'description': 'Nice',
'price': 100.0,
'quantity': 100,
}
self.checkout_url = '/checkout/'
self.order_data = {
'shipping_address': 'abc',
'phone_number' : '123',
}
self.user = self.setup_user()
self.client = APIClient()
self.client.force_authenticate(user=self.user)
return super().setUp()
@staticmethod
def setup_user():
User = get_user_model()
return User.objects.create(username='user1', password='user1', email='user1@mail.com')
|
################################################################################
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the LICENSE file for details.
# SPDX-License-Identifier: MIT
#
# Fusion models for Atomic and molecular STructures (FAST)
# Fusion model of 3D CNN and GCN (or different 3D CNNs) for regression
################################################################################
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
import scipy as sp
import pandas as pd
import tensorflow as tf
sys.stdout.flush()
sys.path.insert(0, '../3dcnn')
from data_reader import *
from dnn_general import *
parser = argparse.ArgumentParser()
parser.add_argument("--main-dir", default=[], nargs="+", help="main model/dataset directory")
parser.add_argument("--fusionmodel-subdir", default=[], nargs="+", help="subdirectory storing fusion models/results (under main_dir)")
#parser.add_argument("--indmodel-subdirs", default=[], nargs="+", help="subdirectory storing individual models/results (under main_dir)")
#parser.add_argument("--csvfile", default=[], nargs="+", help="")
#parser.add_argument("--train-featfiles", default=[], nargs="+", help="")
#parser.add_argument("--val-featfiles", default=[], nargs="+", help="")
#parser.add_argument("--test-featfiles", default=[], nargs="+", help="")
parser.add_argument("--run-mode", default=[], nargs="+", help="1: training, 2: test, 3: test external testset and save features")
parser.add_argument("--external-dir", default=[], nargs="+")
parser.add_argument("--external-csvfile", default=[], nargs="+")
parser.add_argument("--external-3dcnn-featfile", default=[], nargs="+")
parser.add_argument("--external-sgcnn-featfile", default=[], nargs="+")
parser.add_argument("--external-outprefix", default=[], nargs="+")
args = parser.parse_args()
g_main_dir = '../../data'
g_model_subdirs = ['pdbbind2016_general_refined_sgcn_20190728', 'pdbbind2016_refined_pybel_processed_crystal_48_radius1_sigma1_rot0_model_3dcnn_res_result_20191009']
g_fusion_model_subdir = 'pdbbind2016_fusion_48_sgcn_20191009k'
g_train_feat_files = [['pybel_processed_pdbbind_2016_general_refined_Jul_28_19_18_12_1564362743_general_train_hidden_features.npy', 'pybel_processed_pdbbind_2016_general_refined_Jul_28_19_18_12_1564362743_refined_train_hidden_features.npy'], ['pdbbind2016_refined_pybel_processed_crystal_48_radius1_sigma1_rot0_model_3dcnn_res_result_20191009_general_train_fc10.npy', 'pdbbind2016_refined_pybel_processed_crystal_48_radius1_sigma1_rot0_model_3dcnn_res_result_20191009_train_fc10.npy']]
g_val_feat_files = [['pybel_processed_pdbbind_2016_general_refined_Jul_28_19_18_12_1564362743_general_val_hidden_features.npy', 'pybel_processed_pdbbind_2016_general_refined_Jul_28_19_18_12_1564362743_refined_val_hidden_features.npy'], ['pdbbind2016_refined_pybel_processed_crystal_48_radius1_sigma1_rot0_model_3dcnn_res_result_20191009_general_val_fc10.npy', 'pdbbind2016_refined_pybel_processed_crystal_48_radius1_sigma1_rot0_model_3dcnn_res_result_20191009_val_fc10.npy']]
g_test_feat_files = ['pybel_processed_pdbbind_2016_general_refined_Jul_28_19_18_12_1564362743_hidden_features.npy', 'pdbbind2016_refined_pybel_processed_crystal_48_radius1_sigma1_rot0_model_3dcnn_res_result_20191009_test_fc10.npy']
g_csv_file = 'pdbbind2016_general+refined_pybel_processed_crystal_48_radius1_sigma1_rot0_info.csv'
g_external_dir = ''
g_external_csv_file = ''
g_external_feat_files = ['', '']
g_external_out_prefix = ''
#g_model_subdirs = ['pdbbind_2018_with_water', 'pdbbind_2018_with_water']
#g_model_subdirs = ['pdbbind_2018_without_water', 'pdbbind_2018_without_water']
#g_test_feat_files = ['core_test_hidden_fc12.npy', 'pdbbind2016_refined_pybel_processed_crystal_48_radius1_sigma1_rot0_model_3dcnn_res_result_20191009_test_fc10.npy']
#g_csv_file = g_model_subdirs[0], 'pybel_processed_docking_48_radius1_sigma1_rot0_info.csv'
g_csv_ind_input = 1
g_csv_ind_output = 2
g_csv_ind_split = 3
g_input_dim = [48, 48, 48, 75]
g_input_type = 1
g_output_dim = [1]
g_output_type = 10
g_run_mode = 2 # 1: training, 2: testing, 3: testing on external test only data
g_epoch_count = 1000
g_batch_size = 100 # 50
g_save_rate = 0
g_optimizer_info = [1, 0.002, 0.9, 0.999, 1e-08]
g_decay_info = [1, 1000, 0.99]
g_loss_info = [2, 0, 0, 5e-2] # 1: L1, 2: L2 -> doesn't affect
def model_fusion_3(model_name, feat1, feat2, feat3, train_mode, reuse):
with tf.variable_scope(model_name, reuse=reuse):
dropout=0.5
print(feat1.shape)
fc11_w = weight_var([int(feat1.shape[1]), 10], stddev=0.01, name="fc11_w")
fc11_b = bias_var([10], name="fc11_b")
fc11_z = tf.matmul(feat1, fc11_w) + fc11_b
fc11_h = lrelu(bn(tf.nn.dropout(fc11_z, keep_prob=dropout), train_mode,"fc11_bn"))
print(fc11_h.shape)
print(feat2.shape)
fc12_w = weight_var([int(feat2.shape[1]), 10], stddev=0.01, name="fc12_w")
fc12_b = bias_var([10], name="fc12_b")
fc12_z = tf.matmul(feat2, fc12_w) + fc12_b
fc12_h = lrelu(bn(tf.nn.dropout(fc12_z, keep_prob=dropout), train_mode,"fc12_bn"))
print(fc12_h.shape)
print(feat3.shape)
fc13_w = weight_var([int(feat3.shape[1]), 10], stddev=0.01, name="fc13_w")
fc13_b = bias_var([10], name="fc13_b")
fc13_z = tf.matmul(feat3, fc13_w) + fc13_b
fc13_h = lrelu(bn(tf.nn.dropout(fc13_z, keep_prob=dropout), train_mode,"fc13_bn"))
print(fc13_h.shape)
concat = tf.concat([fc11_h, fc12_h, fc13_h], 1)
print(concat.shape)
fc2_w = weight_var([30, 10], stddev=0.01, name="fc2_w")
fc2_b = bias_var([10], name="fc2_b")
fc2_z = tf.matmul(concat, fc2_w) + fc2_b
fc2_h = bn(tf.nn.relu(fc2_z), train_mode,"fc2_bn")
print(fc2_h.shape)
fc3_w = weight_var([10, 1], stddev=0.01, name="fc3_w")
fc3_b = bias_var([1], name="fc3_b")
fc3_z = tf.matmul(fc2_h, fc3_w) + fc3_b
print(fc3_z.shape)
return fc3_z
def model_fusion_2(model_name, feat1, feat2, train_mode, reuse):
with tf.variable_scope(model_name, reuse=reuse):
#dropout=0.5
dropout=1.0
print(feat1.shape)
fc11_w = weight_var([int(feat1.shape[1]), 5], stddev=0.01, name="fc11_w")
fc11_b = bias_var([5], name="fc11_b")
fc11_z = tf.matmul(feat1, fc11_w) + fc11_b
fc11_h = lrelu(bn(tf.nn.dropout(fc11_z, keep_prob=dropout), train_mode,"fc11_bn"))
print(fc11_h.shape)
print(feat2.shape)
fc12_w = weight_var([int(feat2.shape[1]), 5], stddev=0.01, name="fc12_w")
fc12_b = bias_var([5], name="fc12_b")
fc12_z = tf.matmul(feat2, fc12_w) + fc12_b
fc12_h = lrelu(bn(tf.nn.dropout(fc12_z, keep_prob=dropout), train_mode,"fc12_bn"))
print(fc12_h.shape)
#fc2_h = fc11_h + fc12_h #-> becomes worse!
concat = tf.concat([feat1, feat2, fc11_h, fc12_h], 1)
#concat = tf.concat([feat1, fc11_h, fc12_h], 1)
#concat = tf.concat([fc11_h, fc12_h], 1)
print(concat.shape)
fc2_w = weight_var([32, 10], stddev=0.01, name="fc2_w")
fc2_b = bias_var([10], name="fc2_b")
fc2_z = tf.matmul(concat, fc2_w) + fc2_b
fc2_h = bn(tf.nn.relu(fc2_z), train_mode,"fc2_bn")
print(fc2_h.shape)
fc3_w = weight_var([10, 1], stddev=0.01, name="fc3_w")
fc3_b = bias_var([1], name="fc3_b")
fc3_z = tf.matmul(fc2_h, fc3_w) + fc3_b
print(fc3_z.shape)
return fc3_z
def load_data(feat_dir, feat_files):
data = [np.load(os.path.join(feat_dir, feat_file)) for feat_file in feat_files]
return np.concatenate(data, axis=0)
def main():
if args.main_dir:
g_main_dir = args.main_dir[0]
if args.fusionmodel_subdir:
g_fusion_model_subdir = args.fusionmodel_subdir[0]
#if args.indmodel_subdirs:
# g_model_subdirs = args.indmodel_subdirs
#if args.csvfile:
# g_csv_file = args.csvfile[0]
#if args.train_featfiles:
# g_train_feat_files = args.train_featfiles # need to fix!!!
#if args.val_featfiles:
# g_val_feat_files = args.val_featfiles # need to fix!!!
#if args.test_featfiles:
# g_test_feat_files = args.test_featfiles # need to fix!!!
if args.run_mode:
g_run_mode = int(args.run_mode[0])
print(g_run_mode)
if args.external_dir:
g_external_dir = args.external_dir[0]
if args.external_csvfile:
g_external_csv_file = args.external_csvfile[0]
if args.external_3dcnn_featfile:
g_external_feat_files[1] = args.external_3dcnn_featfile[0]
if args.external_sgcnn_featfile:
g_external_feat_files[0] = args.external_sgcnn_featfile[0]
if args.external_outprefix:
g_external_out_prefix = args.external_outprefix[0]
# load dataset
if g_run_mode == 3:
data_reader = DataReader(g_external_dir, g_external_csv_file, g_csv_ind_input, g_csv_ind_output, g_csv_ind_split, g_input_dim, g_input_type, None, g_output_dim, g_output_type, None)
else:
data_reader = DataReader(g_main_dir, g_csv_file, g_csv_ind_input, g_csv_ind_output, g_csv_ind_split, g_input_dim, g_input_type, None, g_output_dim, g_output_type, None)
train_count = len(data_reader.train_list)
val_count = len(data_reader.val_list)
test_count = len(data_reader.test_list)
fusion_model_dir = os.path.join(g_main_dir, g_fusion_model_subdir)
if not os.path.exists(fusion_model_dir):
os.makedirs(fusion_model_dir)
# load feature files
if g_run_mode == 3:
x_test = [np.load(os.path.join(g_external_dir, feat_file)) for feat_file in g_external_feat_files]
else:
x_train = [load_data(os.path.join(g_main_dir, model_subdir), feat_files) for model_subdir, feat_files in zip(g_model_subdirs, g_train_feat_files)]
y_train = np.ndarray(shape=(train_count, 1), dtype=np.float32)
for ind in range(train_count):
input_info, output_info = data_reader.train_list[ind]
y_train[ind] = float(output_info)
x_val = [load_data(os.path.join(g_main_dir, model_subdir), feat_files) for model_subdir, feat_files in zip(g_model_subdirs, g_val_feat_files)]
y_val = np.ndarray(shape=(val_count, 1), dtype=np.float32)
for ind in range(val_count):
input_info, output_info = data_reader.val_list[ind]
y_val[ind] = float(output_info)
x_test = [np.load(os.path.join(g_main_dir, model_subdir, feat_file)) for model_subdir, feat_file in zip(g_model_subdirs, g_test_feat_files)]
y_test = np.ndarray(shape=(test_count, 1), dtype=np.float32)
for ind in range(test_count):
input_info, output_info = data_reader.test_list[ind]
y_test[ind] = float(output_info)
############################################################################
# initialize fusion model
# reset tf variables
tf.reset_default_graph()
# setup place holder for input, output
input_ph1 = tf.placeholder(tf.float32, (None, x_test[0].shape[1]))
input_ph2 = tf.placeholder(tf.float32, (None, x_test[1].shape[1]))
#input_ph3 = tf.placeholder(tf.float32, (None, x_test[2].shape[1]))
output_ph = tf.placeholder(tf.float32, (None, 1))
training_phase_ph = tf.placeholder(tf.bool, name='training_phase_placeholder')
#logit_ph = model_fusion_3('model_fusion_3', input_ph1, input_ph2, input_ph3, training_phase_ph, reuse=False)
logit_ph = model_fusion_2('model_fusion_2', input_ph1, input_ph2, training_phase_ph, reuse=False)
# setup loss
loss = tf.reduce_sum(tf.reduce_mean(tf.square(tf.subtract(logit_ph, output_ph)), axis=0))
tf.summary.scalar('loss', loss)
# setup learning rate and decay
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(g_optimizer_info[1], global_step, g_decay_info[1], g_decay_info[2], staircase=True)
# setup optimizer
optimizer = tf.train.AdamOptimizer(learning_rate, beta1=g_optimizer_info[2], beta2=g_optimizer_info[3], epsilon=g_optimizer_info[4]).minimize(loss, global_step=global_step)
# for tensorboard
merge_summary = tf.summary.merge_all()
############################################################################
# train/test fusion model
if g_run_mode == 1:
# start session
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
# load saved model if available
ckpt = tf.train.get_checkpoint_state(fusion_model_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("### checkpoint found -> model restored!")
train_writer = tf.summary.FileWriter(fusion_model_dir, sess.graph)
output_train_results = []
output_val_results = []
for epoch_ind in range(g_epoch_count):
print('epoch - %d/%d' % (epoch_ind+1, g_epoch_count))
# training
batch_count = train_count // g_batch_size
train_inds = np.array(range(batch_count * g_batch_size))
random.shuffle(train_inds)
l_avg = 0
for batch_ind in range(batch_count):
ind0 = batch_ind * g_batch_size
ind1 = (batch_ind + 1) * g_batch_size
inds = np.array(train_inds[ind0:ind1])
x_batch1 = x_train[0][inds,:]
x_batch2 = x_train[1][inds,:]
#x_batch3 = x_train[2][inds,:]
y_batch = y_train[inds]
#feed_dict = {input_ph1: x_batch1, input_ph2: x_batch2, input_ph3: x_batch3, output_ph: y_batch, training_phase_ph : 1}
feed_dict = {input_ph1: x_batch1, input_ph2: x_batch2, output_ph: y_batch, training_phase_ph : 1}
_, l, lr, y_batch_pred, sstr = sess.run([optimizer, loss, learning_rate, logit_ph, merge_summary], feed_dict=feed_dict)
print('[Training] [%d-%d]-[%d-%d] mini-batch loss: %f, learning rate: %f' % (epoch_ind+1, g_epoch_count, batch_ind+1, batch_count, l, lr))
l_avg += l
if g_save_rate > 0 and (batch_ind % g_save_rate == 0 or batch_ind == batch_count - 1):
model_file = "model_%03d_%05d.ckpt" % (epoch_ind+1, batch_ind+1)
model_path = os.path.join(fusion_model_dir, model_file)
saver.save(sess, model_path)
print('model saved: %s' % model_path)
if batch_ind % 10 == 1:
train_writer.add_summary(sstr, epoch_ind)
# training epoch summary (loss, etc)
l_avg = l_avg / batch_count
output_train_results.append(l_avg)
# validation
batch_count = val_count // 1
y_label = np.ndarray(shape=(batch_count * 1), dtype=np.float32)
y_pred = np.ndarray(shape=(batch_count * 1), dtype=np.float32)
y_error = np.ndarray(shape=(batch_count * 1), dtype=np.float32)
l_avg = 0
for batch_ind in range(batch_count):
ind0 = batch_ind * 1
ind1 = (batch_ind + 1) * 1
x_batch1 = x_val[0][ind0:ind1,:]
x_batch2 = x_val[1][ind0:ind1,:]
#x_batch3 = x_val[2][ind0:ind1,:]
y_batch = y_val[ind0:ind1]
#feed_dict = {input_ph1: x_batch1, input_ph2: x_batch2, input_ph3: x_batch3, output_ph: y_batch, training_phase_ph : 0}
feed_dict = {input_ph1: x_batch1, input_ph2: x_batch2, output_ph: y_batch, training_phase_ph : 0}
l, y_batch_pred = sess.run([loss, logit_ph], feed_dict=feed_dict)
print('[Validating] [%d-%d]' % (batch_ind+1, batch_count))
y_label[ind0:ind1] = y_batch[:,0]
y_pred[ind0:ind1] = y_batch_pred[:,0]
y_error[ind0:ind1] = np.linalg.norm(y_batch - y_batch_pred, axis=1)
l_avg += l
# validation epoch summary
l_avg = l_avg / batch_count
output_val_results.append(l_avg)
l2 = np.mean(y_error)
rmse = math.sqrt(mean_squared_error(y_label, y_pred))
mae = mean_absolute_error(y_label, y_pred)
r2 = r2_score(y_label, y_pred)
pearson, ppval = pearsonr(y_label, y_pred)
spearman, spval = spearmanr(y_label, y_pred)
mean = np.mean(y_pred)
std = np.std(y_pred)
print('[Validating] L2 error: %.3f, RMSE: %.3f, MAE: %.3f, R^2 score: %.3f, Pearson: %.3f, Spearman: %.3f, mean/std: %.3f/%.3f' % (l2, rmse, mae, r2, pearson, spearman, mean, std))
if len(output_val_results) > 2 and l_avg <= min(np.asarray(output_val_results)):
model_file = "model_%03d.ckpt" % (epoch_ind+1)
model_path = os.path.join(fusion_model_dir, model_file)
saver.save(sess, model_path)
print('model saved: %s' % model_path)
output_train_results_file = "output_train_summary.txt"
with open(os.path.join(fusion_model_dir, output_train_results_file), 'w') as output_fp:
for ind, loss in enumerate(output_train_results):
out_str = '%3d %10.4f' % (ind+1, loss)
output_fp.write(out_str)
output_fp.write('\n')
output_val_results_file = "output_val_summary.txt"
with open(os.path.join(fusion_model_dir, output_val_results_file), 'w') as output_fp:
for ind, loss in enumerate(output_val_results):
out_str = '%3d %10.4f' % (ind+1, loss)
output_fp.write(out_str)
output_fp.write('\n')
elif g_run_mode == 2 or g_run_mode == 3:
# start session
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
# load saved model if available
ckpt = tf.train.get_checkpoint_state(fusion_model_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("### checkpoint found -> model restored!")
batch_count = test_count // 1
y_label = np.ndarray(shape=(batch_count * 1), dtype=np.float32)
y_pred = np.ndarray(shape=(batch_count * 1), dtype=np.float32)
y_error = np.ndarray(shape=(batch_count * 1), dtype=np.float32)
for batch_ind in range(batch_count):
ind0 = batch_ind * 1
ind1 = (batch_ind + 1) * 1
x_batch1 = x_test[0][ind0:ind1,:]
x_batch2 = x_test[1][ind0:ind1,:]
#x_batch3 = x_test[2][ind0:ind1,:]
y_batch = y_test[ind0:ind1]
#feed_dict = {input_ph1: x_batch1, input_ph2: x_batch2, input_ph3: x_batch3, training_phase_ph : 0}
feed_dict = {input_ph1: x_batch1, input_ph2: x_batch2, training_phase_ph : 0}
y_batch_pred = sess.run(logit_ph, feed_dict=feed_dict)
print('[Testing] [%d-%d]' % (batch_ind+1, batch_count))
y_label[ind0:ind1] = y_batch[:,0]
y_pred[ind0:ind1] = y_batch_pred[:,0]
y_error[ind0:ind1] = np.linalg.norm(y_batch - y_batch_pred, axis=1)
l2 = np.mean(y_error)
rmse = math.sqrt(mean_squared_error(y_label, y_pred))
mae = mean_absolute_error(y_label, y_pred)
r2 = r2_score(y_label, y_pred)
pearson, ppval = pearsonr(y_label, y_pred)
spearman, spval = spearmanr(y_label, y_pred)
mean = np.mean(y_pred)
std = np.std(y_pred)
print('[Testing] L2 error: %.3f, RMSE: %.3f, MAE: %.3f, R^2 score: %.3f, Pearson: %.3f, Spearman: %.3f, mean/std: %.3f/%.3f' % (l2, rmse, mae, r2, pearson, spearman, mean, std))
if g_run_mode == 3:
output_dir = g_external_dir
output_file = "%s_pred.txt" % g_external_out_prefix
else:
output_dir = fusion_model_dir
output_file = "fusion_output_test_pred.txt"
np.save(os.path.join(output_dir, output_file[:-3] + 'npy'), y_pred)
with open(os.path.join(output_dir, output_file), 'w') as output_fp:
for out_ind in range(y_error.shape[0]):
out_str = '%10.4f %10.4f %10.4f' % (y_label[out_ind], y_pred[out_ind], y_error[out_ind])
output_fp.write(out_str)
output_fp.write('\n')
tf.Session().close()
if __name__ == '__main__':
main()
|
import os
import re
import sys
from collections import defaultdict
from conans.model.ref import ConanFileReference
from conan.test_package_runner import TestPackageRunner, DockerTestPackageRunner
from conan.builds_generator import (get_linux_gcc_builds, get_visual_builds,
get_osx_apple_clang_builds, get_mingw_builds, BuildConf)
from conan.log import logger
from conans.model.profile import Profile
def get_mingw_config_from_env():
tmp = os.getenv("MINGW_CONFIGURATIONS", "")
# 4.9@x86_64@seh@posix",4.9@x86_64@seh@win32"
if not tmp:
return []
ret = []
confs = tmp.split(",")
for conf in confs:
conf = conf.strip()
ret.append(conf.split("@"))
return ret
class PlatformInfo(object):
"""Easy mockable for testing"""
def system(self):
import platform
return platform.system()
class ConanMultiPackager(object):
""" Help to generate common builds (setting's combinations), adjust the environment,
and run conan test_package command in docker containers"""
default_gcc_versions = ["4.6", "4.8", "4.9", "5.2", "5.3", "5.4", "6.2", "6.3"]
default_visual_versions = ["10", "12", "14"]
default_visual_runtimes = ["MT", "MD", "MTd", "MDd"]
default_apple_clang_versions = ["7.3", "8.0", "8.1"]
default_archs = ["x86", "x86_64"]
def __init__(self, args=None, username=None, channel=None, runner=None,
gcc_versions=None, visual_versions=None, visual_runtimes=None,
apple_clang_versions=None, archs=None,
use_docker=None, curpage=None, total_pages=None,
docker_image=None, reference=None, password=None, remote=None,
upload=None, stable_branch_pattern=None,
vs10_x86_64_enabled=False,
mingw_configurations=None,
stable_channel=None,
platform_info=None,
upload_retry=None):
self._builds = []
self._named_builds = {}
self._platform_info = platform_info or PlatformInfo()
self.runner = runner or os.system
self.args = args or " ".join(sys.argv[1:])
self.username = username or os.getenv("CONAN_USERNAME", None)
if not self.username:
raise Exception("Instance ConanMultiPackage with 'username' "
"parameter or use CONAN_USERNAME env variable")
# Upload related variables
self.upload_retry = upload_retry or os.getenv("CONAN_UPLOAD_RETRY", 3)
self.reference = reference or os.getenv("CONAN_REFERENCE", None)
self.password = password or os.getenv("CONAN_PASSWORD", None)
self.remote = remote or os.getenv("CONAN_REMOTE", None)
self.upload = upload or (os.getenv("CONAN_UPLOAD", None) in ["True", "true", "1"])
self.stable_branch_pattern = stable_branch_pattern or os.getenv("CONAN_STABLE_BRANCH_PATTERN", None)
default_channel = channel or os.getenv("CONAN_CHANNEL", "testing")
stable_channel = stable_channel or os.getenv("CONAN_STABLE_CHANNEL", "stable")
self.channel = self._get_channel(default_channel, stable_channel)
os.environ["CONAN_CHANNEL"] = self.channel
self.gcc_versions = gcc_versions or \
list(filter(None, os.getenv("CONAN_GCC_VERSIONS", "").split(","))) or \
self.default_gcc_versions
if visual_versions is not None:
self.visual_versions = visual_versions
else:
env_visual_versions = list(filter(None, os.getenv("CONAN_VISUAL_VERSIONS", "").split(",")))
self.visual_versions = env_visual_versions or self.default_visual_versions
self.visual_runtimes = visual_runtimes or \
list(filter(None, os.getenv("CONAN_VISUAL_RUNTIMES", "").split(","))) or \
self.default_visual_runtimes
self.apple_clang_versions = apple_clang_versions or \
list(filter(None, os.getenv("CONAN_APPLE_CLANG_VERSIONS", "").split(","))) or \
self.default_apple_clang_versions
self.mingw_configurations = mingw_configurations or get_mingw_config_from_env()
self.mingw_installer_reference = ConanFileReference.loads(os.getenv("CONAN_MINGW_INSTALLER_REFERENCE") or
"mingw_installer/0.1@lasote/testing")
self.archs = archs or \
list(filter(None, os.getenv("CONAN_ARCHS", "").split(","))) or \
self.default_archs
self.use_docker = use_docker or os.getenv("CONAN_USE_DOCKER", False)
self.curpage = curpage or os.getenv("CONAN_CURRENT_PAGE", 1)
self.total_pages = total_pages or os.getenv("CONAN_TOTAL_PAGES", 1)
self.docker_image = docker_image or os.getenv("CONAN_DOCKER_IMAGE", None)
if self.password:
self.password = self.password.replace('"', '\\"')
self.conan_pip_package = os.getenv("CONAN_PIP_PACKAGE", None)
self.vs10_x86_64_enabled = vs10_x86_64_enabled
@property
def builds(self):
return self._builds
@builds.setter
def builds(self, confs):
"""For retrocompatibility directly assigning builds"""
self._named_builds = {}
self._builds = []
for values in confs:
if len(values) == 2:
self._builds.append(BuildConf(values[0], values[1], {}, {}))
elif len(values) != 4:
raise Exception("Invalid build configuration, has to be a tuple of "
"(settings, options, env_vars, build_requires)")
else:
self._builds.append(BuildConf(*values))
@property
def named_builds(self):
return self._named_builds
@named_builds.setter
def named_builds(self, confs):
self._builds = []
self._named_builds = {}
for key, pages in confs.items():
for values in pages:
if len(values) == 2:
self._named_builds.setdefault(key,[]).append(BuildConf(values[0], values[1], {}, {}))
elif len(values) != 4:
raise Exception("Invalid build configuration, has to be a tuple of "
"(settings, options, env_vars, build_requires)")
else:
self._named_builds.setdefault(key,[]).append(BuildConf(*values))
def add_common_builds(self, shared_option_name=None, pure_c=True, dll_with_static_runtime=False):
builds = []
if self._platform_info.system() == "Windows":
if self.mingw_configurations:
builds = get_mingw_builds(self.mingw_configurations, self.mingw_installer_reference, self.archs)
builds.extend(get_visual_builds(self.visual_versions, self.archs, self.visual_runtimes,
shared_option_name, dll_with_static_runtime, self.vs10_x86_64_enabled))
elif self._platform_info.system() == "Linux":
builds = get_linux_gcc_builds(self.gcc_versions, self.archs, shared_option_name, pure_c)
elif self._platform_info.system() == "Darwin":
builds = get_osx_apple_clang_builds(self.apple_clang_versions, self.archs, shared_option_name, pure_c)
self.builds.extend(builds)
def add(self, settings=None, options=None, env_vars=None, build_requires=None):
settings = settings or {}
options = options or {}
env_vars = env_vars or {}
build_requires = build_requires or {}
self.builds.append(BuildConf(settings, options, env_vars, build_requires))
def run(self):
self._pip_install()
self.run_builds()
self.upload_packages()
def run_builds(self, curpage=None, total_pages=None):
if len(self.named_builds) > 0 and len(self.builds) > 0:
raise Exception("Both bulk and named builds are set. Only one is allowed.")
self.runner('conan export %s/%s' % (self.username, self.channel))
builds_in_current_page = []
if len(self.builds) > 0:
curpage = curpage or int(self.curpage)
total_pages = total_pages or int(self.total_pages)
for index, build in enumerate(self.builds):
if curpage is None or total_pages is None or (index % total_pages) + 1 == curpage:
builds_in_current_page.append(build)
elif len(self.named_builds) > 0:
curpage = curpage or self.curpage
if curpage not in self.named_builds:
raise Exception("No builds set for page " + curpage)
for build in self.named_builds[curpage]:
builds_in_current_page.append(build)
print("Page : ", curpage)
print("Builds list:")
for p in builds_in_current_page: print(list(p._asdict().items()))
pulled_gcc_images = defaultdict(lambda: False)
for build in builds_in_current_page:
profile = _get_profile(build)
gcc_version = profile.settings.get("compiler.version")
if self.use_docker:
build_runner = DockerTestPackageRunner(profile, self.username, self.channel,
self.mingw_installer_reference, self.runner, self.args,
docker_image=self.docker_image)
build_runner.run(pull_image=not pulled_gcc_images[gcc_version])
pulled_gcc_images[gcc_version] = True
else:
build_runner = TestPackageRunner(profile, self.username, self.channel,
self.mingw_installer_reference, self.runner, self.args)
build_runner.run()
def upload_packages(self):
if not self.upload:
return
if not self.reference or not self.password or not self.channel or not self.username:
logger.info("Skipped upload, some parameter (reference, password or channel)"
" is missing!")
return
command = "conan upload %s@%s/%s --retry %s --all --force" % (self.reference,
self.username,
self.channel,
self.upload_retry)
user_command = 'conan user %s -p="%s"' % (self.username, self.password)
logger.info("******** RUNNING UPLOAD COMMAND ********** \n%s" % command)
if self._platform_info.system() == "Linux" and self.use_docker:
self.runner("sudo chmod -R 777 ~/.conan/data")
# self.runner("ls -la ~/.conan")
if self.remote:
command += " -r %s" % self.remote
user_command += " -r %s" % self.remote
ret = self.runner(user_command)
if ret != 0:
raise Exception("Error with user credentials")
ret = self.runner(command)
if ret != 0:
raise Exception("Error uploading")
def _pip_install(self):
if self.conan_pip_package:
sudo = "sudo" if self._platform_info.system() != "Windows" else ""
self.runner('%s pip install %s' % (sudo, self.conan_pip_package))
def _get_channel(self, default_channel, stable_channel):
pattern = self.stable_branch_pattern or "master"
prog = re.compile(pattern)
travis = os.getenv("TRAVIS", False)
travis_branch = os.getenv("TRAVIS_BRANCH", None)
appveyor = os.getenv("APPVEYOR", False)
appveyor_branch = os.getenv("APPVEYOR_REPO_BRANCH", None)
bamboo = os.getenv("bamboo_buildNumber", False)
bamboo_branch = os.getenv("bamboo_planRepository_branch", None)
jenkins = os.getenv("JENKINS_URL", False)
jenkins_branch = os.getenv("BRANCH_NAME", None)
gitlab = os.getenv("GITLAB_CI", False) # Mark that job is executed in GitLab CI environment
gitlab_branch = os.getenv("CI_BUILD_REF_NAME", None) # The branch or tag name for which project is built
channel = stable_channel if travis and prog.match(travis_branch) else None
channel = stable_channel if appveyor and prog.match(appveyor_branch) and \
not os.getenv("APPVEYOR_PULL_REQUEST_NUMBER") else channel
channel = stable_channel if bamboo and prog.match(bamboo_branch) else channel
channel = stable_channel if jenkins and jenkins_branch and prog.match(jenkins_branch) else channel
channel = stable_channel if gitlab and gitlab_branch and prog.match(gitlab_branch) else channel
if channel:
logger.warning("Redefined channel by CI branch matching with '%s', "
"setting CONAN_CHANNEL to '%s'" % (pattern, channel))
self.username = os.getenv("CONAN_STABLE_USERNAME", self.username)
self.password = os.getenv("CONAN_STABLE_PASSWORD", self.password)
ret = channel or default_channel
return ret
def _get_profile(build_conf):
tmp = """
[settings]
%s
[options]
%s
[env]
%s
[build_requires]
%s
"""
settings = "\n".join(["%s=%s" % (k, v) for k, v in sorted(build_conf.settings.items())])
options = "\n".join(["%s=%s" % (k, v) for k, v in build_conf.options.items()])
env_vars = "\n".join(["%s=%s" % (k, v) for k, v in build_conf.env_vars.items()])
br_lines = ""
for pattern, build_requires in build_conf.build_requires.items():
br_lines += "\n".join(["%s:%s" % (pattern, br) for br in build_requires])
return Profile.loads(tmp % (settings, options, env_vars, br_lines))
if __name__ == "__main__":
os.environ["MINGW_CONFIGURATIONS"] = '4.9@x86_64@seh@posix, 4.9@x86_64@seh@win32'
# mingw_configurations = [("4.9", "x86_64", "seh", "posix"),
# ("4.9", "x86_64", "sjlj", "posix"),
# ("4.9", "x86", "sjlj", "posix"),
# ("4.9", "x86", "dwarf2", "posix")]
builder = ConanMultiPackager(username="lasote", mingw_configurations=None, use_docker=False)
builder.add_common_builds(pure_c=False)
builder.add(build_requires={"*": ["uno/1.0@lasote/stable", "dos/0.1@lasote/testing"]})
builder.run()
|
print (" this process is for First Come First Serve")
size = int(raw_input("Enter How many Process you want to Enter ??"))
process = [0] * size
arrival = [0] * size
burst = [0] * size
for i in range(size):
process[i] = (raw_input("Enter process name"))
arrival[i] = int(raw_input("Enter Arrival Time for the process"))
burst[i] = int(raw_input("Enter Burst time for the Process"))
print(" ")
print(" your Enter Pocess Information")
for i in range(size):
print(process[i], " ", arrival[i], " ", burst[i])
start = [0] * size
turn = [0] * size
waiting_Time = [0] * size
turn_time = [0] * size
for j in range(size):
if j == 0:
start[j] = arrival[j]
turn[j] = arrival[j] + burst[j]
if j >= 1:
start[j] = turn[j - 1]
turn[j] = burst[j] + turn[j - 1]
for k in range(size):
first1 = min(arrival)
runn = arrival.index(first1)
print(process[k], 'arrival time is', arrival[k], ' starts at ', start[k], 'and ends at ', turn[k])
for m in range(size):
waiting_Time[m] = start[m] - arrival[m]
turn_time[m] = turn[m] - arrival[m]
sum1 = 0
sum2 = 0
print("")
for l in range(size):
sum1 += waiting_Time[l]
sum2 += turn_time[l]
print(process[l], 'waiting time is', waiting_Time[l])
print(process[l], 'turn time is', turn_time[l])
awaiting_Time = sum1 /size
aturn_time = sum2 / size
print("")
print('average waiting time is: ', awaiting_Time)
print('average turn around time is: ', aturn_time)
|
#声明一个学员列表
student_list=[]
while True:
print(''''
1-添加学员姓名:
2-修改学员姓名:
3-查询学员姓名:
4-删除学员姓名:
0-退出
''')
select_number=int(input('请输入操作序号:'))
while select_number<0 or select_number>4:
select_number=int(input('输入错误,重新输入'))
if select_number==1:
name=input('请输入要添加的学员姓名:')
student_list.append(name)
print('学员添加成功')
if select_number==2:
#查询列表中是否有学员
if len(student_list):
for x in range (0,len(student_list)):
print(x+1,student_list[x])
select_number=int(input('请输入要修改学员的序号:'))
while select_number<1 or select_number>len(student_list):
student_list=int(input('重新输入学员序号:'))
new_name=input('请输入要修改的姓名:')
student_list[select_number-1]=new_name
print('学院信息修改成功')
else:
print('学员信息为空,无法查询')
if select_number==3:
if len(student_list):
print('''1-输入序号查询
2-查询所有学员
''')
select_number=int(input('请输入操作序号:'))
while select_number!=1 and select_number!=2:
select_number=int(input('输入操作序号错误,请重新输入:'))
if select_number==1:
stu_number=int(input('请输入查询序号:'))
name=student_list[stu_number-1]
print('查询到的姓名是:%s'%name)
if select_number==2:
for x in range (0,len(student_list)):
print(x+1,student_list[x])
else:
print('学员信息为空,无法查询')
if select_number==4:
if len(student_list):
print('''
1-输入序号删除
2-输入姓名删除
3-删除所有学员
''')
for x in range (0,len(student_list)):
print(x+1,student_list[x])
select_number=int(input('请输入操作序号:'))
if select_number!=1 and select_number!=2 and select_number!=3 :
select_number=int(input('请重新输入操作序号:'))
if select_number==1:
select=int(input('请输入删除序号:'))
while select<1 and select>(0,len(student_list)):
select=int(input('输入序号错误,请重新输入序号:'))
student_list.pop(select-1)
print('删除学员成功')
if select_number==2:
name=input('请输入删除学员的姓名:')
while name not in student_list:
name=input('重新输入删除学员的姓名:')
student_list.remove(name)
print('删除学员成功')
if select_number==3:
while len(student_list):
del student_list[0]
print('学员信息为空无法删除')
if select_number==0:
break
|
import sys
from heapq import *
class StringWrapper:
def __init__(self,s):
self.s = "".join([ x for x in s if x != "\n" ])
def __eq__(self,that):
return len(self.s) == len(that.s)
def __lt__(self,that):
return len(self.s) < len(that.s)
def __gt__(self,that):
return len(self.s) > len(that.s)
def __le__(self,that):
return len(self.s) <= len(that.s)
def __ge__(self,that):
return len(self.s) >= len(that.s)
def __str__(self):
return self.s
def __repr__(self):
return self.s
if __name__ == "__main__":
n = 0
heap = []
lines = open(sys.argv[1], 'r')
for line in lines:
if n == 0:
n = int(line)
else:
wrapper = StringWrapper(line)
if len(heap) == n:
if wrapper >= heap[0]:
heapreplace(heap,wrapper)
else:
heappush(heap,wrapper)
res = [0]*len(heap)
i = len(heap) - 1
while len(heap) > 0:
res[i] = heappop(heap).s
i -= 1
print("\n".join(res))
lines.close() |
from datetime import datetime, timedelta
from typing import Any, Dict, List
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from fastapi.testclient import TestClient
from jose import JWTError, jwt
from passlib.context import CryptContext
from pydantic import BaseModel
# normally would load from env
SECRET_KEY = (
"963456d8424a9e506d82d1947774c56a2fa3cf1099315cd93e07f44dc5eea21a" # noqa S105
)
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
LAME_PASSWORD = "1234" # noqa S105
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: str
class Food(BaseModel):
id: int
name: str
serving_size: str
kcal_per_serving: int
protein_grams: float
fibre_grams: float = 0
class User(BaseModel):
id: int
username: str
password: str
def __init__(self, **data: Any):
data["password"] = get_password_hash(data["password"])
super().__init__(**data)
class FoodEntry(BaseModel):
id: int
user: User
food: Food
date_added: datetime = datetime.now()
number_servings: float
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
food_log: Dict[int, FoodEntry] = {}
# We created an extra in memory user DB
users_db: Dict[str, User] = {}
def verify_password(plain_password, hashed_password):
"""Provided, all good"""
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
"""Provided, all good"""
return pwd_context.hash(password)
def get_user(username: str):
"""Provided, all good"""
if username in users_db:
user = users_db[username]
return user
def authenticate_user(username: str, password: str):
"""
TODO: complete this function, use:
https://fastapi.tiangolo.com/tutorial/security/oauth2-jwt/
"""
user = get_user(username)
if not user:
return False
if not verify_password(password, user.password):
return False
return user
def create_access_token(data: dict, expires_delta: timedelta):
"""TODO: complete this function"""
to_encode = data.copy()
# to_encode.update({"exp": expires_delta})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def get_current_user(token: str = Depends(oauth2_scheme)):
user = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
return user
@app.post("/create_user", status_code=201)
async def create_user(user: User):
"""Ignore / don't touch this endpoint, the tests will use it"""
users_db[user.username] = user
return user
@app.post("/token", response_model=Token)
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):
user = authenticate_user(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password"
)
access_token_expires = ACCESS_TOKEN_EXPIRE_MINUTES
access_token = create_access_token(
data={"user_id": user.id,
"username": user.username
}
, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
@app.post("/", status_code=201)
async def create_food_entry(entry: FoodEntry, token=Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("username")
except JWTError:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials"
)
if username is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials"
)
if get_user(username).id != entry.user.id:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Can only add food for current user"
)
food_log[entry.id] = entry
return entry
@app.get("/", response_model=List[FoodEntry])
async def get_foods_for_user(current_user: User = Depends(get_current_user)):
"""
This endpoint does not take a user_id anymore, make it so that the
food entries are filtered on logged in user.
"""
return [
food_entry
for food_entry in food_log.values()
if food_entry.user.id == current_user['user_id']
]
@app.put("/{entry_id}", response_model=FoodEntry)
async def update_food_entry(entry_id: int, new_entry: FoodEntry, token=Depends(oauth2_scheme)):
if entry_id not in food_log:
raise HTTPException(status_code=404, detail="Food entry not found")
if entry_id not in food_log:
raise HTTPException(status_code=404, detail="Food entry not found")
if food_log[entry_id].user.id != get_current_user(token)['user_id']:
raise HTTPException(status_code=400, detail="Food entry not owned by you")
food_log[entry_id] = new_entry
return new_entry
@app.delete("/{entry_id}", response_model=Dict[str, bool])
async def delete_food_entry(entry_id: int, token=Depends(oauth2_scheme)):
if entry_id not in food_log:
raise HTTPException(status_code=404, detail="Food entry not found")
if entry_id not in food_log:
raise HTTPException(status_code=404, detail="Food entry not found")
if food_log[entry_id].user.id != get_current_user(token)['user_id']:
raise HTTPException(status_code=400, detail="Food entry not owned by you")
del food_log[entry_id]
return {"ok": True}
def _get_token(client, username):
payload = {"username": username, "password": LAME_PASSWORD}
resp = client.post("/token", data=payload)
return resp
def _create_food_as_user(client, payload, username):
resp = _get_token(client, username)
data = resp.json()
token = data["access_token"]
headers = {"Authorization": f"Bearer {token}"}
resp = client.post("/", json=payload, headers=headers)
assert resp.status_code == 201
return headers
# def main():
# print('thank you for looking after Mama and Naia')
#
# client = TestClient(app)
#
# user1 = dict(id=1, username="tim", password=LAME_PASSWORD)
# user2 = dict(id=2, username="sara", password=LAME_PASSWORD)
#
# for usr in (user1, user2):
# client.post("/create_user", json=usr)
#
# food1 = dict(
# id=1,
# name="egg",
# serving_size="piece",
# kcal_per_serving=78,
# protein_grams=6.2,
# fibre_grams=0,
# )
#
# food2 = dict(
# id=2,
# name="oatmeal",
# serving_size="100 grams",
# kcal_per_serving=336,
# protein_grams=13.2,
# fibre_grams=10.1,
# )
#
# payload = dict(id=1, user=user1, food=food1, number_servings=1.5)
# _create_food_as_user(client, payload, user1["username"])
# payload = dict(id=2, user=user2, food=food2, number_servings=2)
# headers = _create_food_as_user(client, payload, user2["username"])
# # cannot delete user1 food as user2
# resp = client.delete("/1", headers=headers)
# print(resp.status_code)
# print(resp.json())
#
#
#
# if __name__ == '__main__':
# main()
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import
import distutils.util
try:
from importlib.machinery import EXTENSION_SUFFIXES
except ImportError: # pragma: no cover
import imp
EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
del imp
import collections
import logging
import os
import platform
import re
import struct
import sys
import sysconfig
import warnings
from ._typing import TYPE_CHECKING, cast
if TYPE_CHECKING: # pragma: no cover
from typing import (
IO,
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
GlibcVersion = Tuple[int, int]
logger = logging.getLogger(__name__)
INTERPRETER_SHORT_NAMES = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
} # type: Dict[str, str]
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
# If glibc ever changes its major version, we need to know what the last
# minor version was, so we can build the complete list of all versions.
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR = collections.defaultdict(lambda: 50) # type: Dict[int, int]
glibcVersion = collections.namedtuple("Version", ["major", "minor"])
class Tag(object):
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter, abi, platform):
# type: (str, str, str) -> None
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
@property
def interpreter(self):
# type: () -> str
return self._interpreter
@property
def abi(self):
# type: () -> str
return self._abi
@property
def platform(self):
# type: () -> str
return self._platform
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, Tag):
return NotImplemented
return (
(self.platform == other.platform)
and (self.abi == other.abi)
and (self.interpreter == other.interpreter)
)
def __hash__(self):
# type: () -> int
return self._hash
def __str__(self):
# type: () -> str
return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
def __repr__(self):
# type: () -> str
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
def parse_tag(tag):
# type: (str) -> FrozenSet[Tag]
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _warn_keyword_parameter(func_name, kwargs):
# type: (str, Dict[str, bool]) -> bool
"""
Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
"""
if not kwargs:
return False
elif len(kwargs) > 1 or "warn" not in kwargs:
kwargs.pop("warn", None)
arg = next(iter(kwargs.keys()))
raise TypeError(
"{}() got an unexpected keyword argument {!r}".format(func_name, arg)
)
return kwargs["warn"]
def _get_config_var(name, warn=False):
# type: (str, bool) -> Union[int, str, None]
value = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string):
# type: (str) -> str
return string.replace(".", "_").replace("-", "_")
def _abi3_applies(python_version):
# type: (PythonVersion) -> bool
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version, warn=False):
# type: (PythonVersion, bool) -> List[str]
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append("cp{version}".format(version=version))
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version=None, # type: Optional[PythonVersion]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
warn = _warn_keyword_parameter("cpython_tags", kwargs)
if not python_version:
python_version = sys.version_info[:2]
interpreter = "cp{}".format(_version_nodot(python_version[:2]))
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or _platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
yield tag
for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
yield tag
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi():
# type: () -> Iterator[str]
abi = sysconfig.get_config_var("SOABI")
if abi:
yield _normalize_string(abi)
def generic_tags(
interpreter=None, # type: Optional[str]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
warn = _warn_keyword_parameter("generic_tags", kwargs)
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
platforms = list(platforms or _platform_tags())
abis = list(abis)
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version):
# type: (PythonVersion) -> Iterator[str]
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield "py{version}".format(version=_version_nodot(py_version[:2]))
yield "py{major}".format(major=py_version[0])
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
def compatible_tags(
python_version=None, # type: Optional[PythonVersion]
interpreter=None, # type: Optional[str]
platforms=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[Tag]
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or _platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
# type: (str, bool) -> str
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version, cpu_arch):
# type: (MacVersion, str) -> List[str]
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
return formats
def mac_platforms(version=None, arch=None):
# type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver() # type: ignore
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
for minor_version in range(version[1], -1, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=10, minor=minor_version, binary_format=binary_format
)
if version >= (11, 0):
# Starting with Mac OS 11, each yearly release bumps the major version
# number. The minor versions are now the midyear updates.
for major_version in range(version[0], 10, -1):
compat_version = major_version, 0
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=major_version, minor=0, binary_format=binary_format
)
if version >= (11, 0):
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
# releases exist.
#
# However, the "universal2" binary format can have a
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
# that version of macOS.
if arch == "x86_64":
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
else:
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_format = "universal2"
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
# From PEP 513, PEP 600
def _is_manylinux_compatible(name, arch, glibc_version):
# type: (str, str, GlibcVersion) -> bool
sys_glibc = _get_glibc_version()
if sys_glibc < glibc_version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
except ImportError:
pass
else:
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(
glibc_version[0], glibc_version[1], arch
)
if result is not None:
return bool(result)
else:
if glibc_version == (2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if glibc_version == (2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if glibc_version == (2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
def _glibc_version_string():
# type: () -> Optional[str]
# Returns glibc version string, or None if not using glibc.
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _glibc_version_string_confstr():
# type: () -> Optional[str]
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
try:
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
"CS_GNU_LIBC_VERSION"
)
assert version_string is not None
_, version = version_string.split() # type: Tuple[str, str]
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes():
# type: () -> Optional[str]
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
# Note: typeshed is wrong here so we are ignoring this line.
process_namespace = ctypes.CDLL(None) # type: ignore
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version() # type: str
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _parse_glibc_version(version_str):
# type: (str) -> Tuple[int, int]
# Parse glibc version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
"Expected glibc version with 2 components major.minor,"
" got: %s" % version_str,
RuntimeWarning,
)
return -1, -1
return (int(m.group("major")), int(m.group("minor")))
_glibc_version = [] # type: List[Tuple[int, int]]
def _get_glibc_version():
# type: () -> Tuple[int, int]
if _glibc_version:
return _glibc_version[0]
version_str = _glibc_version_string()
if version_str is None:
_glibc_version.append((-1, -1))
else:
_glibc_version.append(_parse_glibc_version(version_str))
return _glibc_version[0]
# Python does not provide platform information at sufficient granularity to
# identify the architecture of the running executable in some cases, so we
# determine it dynamically by reading the information from the running
# process. This only applies on Linux, which uses the ELF format.
class _ELFFileHeader(object):
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
class _InvalidELFFileHeader(ValueError):
"""
An invalid ELF file header was found.
"""
ELF_MAGIC_NUMBER = 0x7F454C46
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFDATA2LSB = 1
ELFDATA2MSB = 2
EM_386 = 3
EM_S390 = 22
EM_ARM = 40
EM_X86_64 = 62
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
def __init__(self, file):
# type: (IO[bytes]) -> None
def unpack(fmt):
# type: (str) -> int
try:
(result,) = struct.unpack(
fmt, file.read(struct.calcsize(fmt))
) # type: (int, )
except struct.error:
raise _ELFFileHeader._InvalidELFFileHeader()
return result
self.e_ident_magic = unpack(">I")
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_class = unpack("B")
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_data = unpack("B")
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_version = unpack("B")
self.e_ident_osabi = unpack("B")
self.e_ident_abiversion = unpack("B")
self.e_ident_pad = file.read(7)
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
self.e_type = unpack(format_h)
self.e_machine = unpack(format_h)
self.e_version = unpack(format_i)
self.e_entry = unpack(format_p)
self.e_phoff = unpack(format_p)
self.e_shoff = unpack(format_p)
self.e_flags = unpack(format_i)
self.e_ehsize = unpack(format_h)
self.e_phentsize = unpack(format_h)
self.e_phnum = unpack(format_h)
self.e_shentsize = unpack(format_h)
self.e_shnum = unpack(format_h)
self.e_shstrndx = unpack(format_h)
def _get_elf_header():
# type: () -> Optional[_ELFFileHeader]
try:
with open(sys.executable, "rb") as f:
elf_header = _ELFFileHeader(f)
except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
return None
return elf_header
def _is_linux_armhf():
# type: () -> bool
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_ARM
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABIMASK
) == elf_header.EF_ARM_ABI_VER5
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
) == elf_header.EF_ARM_ABI_FLOAT_HARD
return result
def _is_linux_i686():
# type: () -> bool
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_386
return result
def _have_compatible_manylinux_abi(arch):
# type: (str) -> bool
if arch == "armv7l":
return _is_linux_armhf()
if arch == "i686":
return _is_linux_i686()
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
def _manylinux_tags(linux, arch):
# type: (str, str) -> Iterator[str]
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = glibcVersion(2, 16)
if arch in {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = glibcVersion(2, 4)
current_glibc = glibcVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_max_list.append(glibcVersion(glibc_major, _LAST_GLIBC_MINOR[glibc_major]))
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = (glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_manylinux_compatible(tag, arch, glibc_version):
yield linux.replace("linux", tag)
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_manylinux_compatible(legacy_tag, arch, glibc_version):
yield linux.replace("linux", legacy_tag)
def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
# type: (bool) -> Iterator[str]
linux = _normalize_string(distutils.util.get_platform())
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv7l"
_, arch = linux.split("_", 1)
if _have_compatible_manylinux_abi(arch):
for tag in _manylinux_tags(linux, arch):
yield tag
yield linux
def _generic_platforms():
# type: () -> Iterator[str]
yield _normalize_string(distutils.util.get_platform())
def _platform_tags():
# type: () -> Iterator[str]
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name():
# type: () -> str
"""
Returns the name of the running interpreter.
"""
try:
name = sys.implementation.name # type: ignore
except AttributeError: # pragma: no cover
# Python 2.7 compatibility.
name = platform.python_implementation().lower()
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(**kwargs):
# type: (bool) -> str
"""
Returns the version of the running interpreter.
"""
warn = _warn_keyword_parameter("interpreter_version", kwargs)
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version):
# type: (PythonVersion) -> str
return "".join(map(str, version))
def sys_tags(**kwargs):
# type: (bool) -> Iterator[Tag]
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
warn = _warn_keyword_parameter("sys_tags", kwargs)
interp_name = interpreter_name()
if interp_name == "cp":
for tag in cpython_tags(warn=warn):
yield tag
else:
for tag in generic_tags():
yield tag
for tag in compatible_tags():
yield tag
|
# it is a mian module where all the program will execute after running this module
# this module has imported two module
# Module 1 is imported to read and display
import Module_1_Read_Display_File
Module_1_Read_Display_File.readDisplayFile()
#Modile 3 is imported for the transaction of borrowing and returning part.
import Module_3_Main_Transaction_Part
Module_3_Main_Transaction_Part.transaction()
|
# To change this template, choose Tools | Templates
# and open the template in the editor.
__author__="Area51"
__date__ ="$Mar 9, 2011 4:10:58 AM$"
class KeyCleaner():
def unset_tweet_keys(self,content):
if content.has_key("retweeted_status"):
content["retweeted_status"] = self.unset_tweet_keys(content["retweeted_status"])
del content['contributors']
del content['in_reply_to_screen_name']
del content['in_reply_to_status_id']
del content['in_reply_to_status_id_str']
del content['in_reply_to_user_id_str']
del content['in_reply_to_user_id']
del content['retweeted']
del content['truncated']
del content['favorited']
del content['id']
del content['id_str']
content['user'] = self.unset_user_keys(content['user'])
return content
def unset_user_keys(self,user):
del user["contributors_enabled"]
del user["follow_request_sent"]
del user["id_str"]
del user["is_translator"]
del user["notifications"]
del user["protected"]
del user["show_all_inline_media"]
del user["profile_background_image_url"]
del user["profile_background_tile"]
del user["profile_background_color"]
del user["profile_image_url"]
del user["profile_link_color"]
del user["profile_sidebar_border_color"]
del user["profile_sidebar_fill_color"]
del user["profile_text_color"]
del user["profile_use_background_image"]
return user
|
import sys
# Task:
# Write a MapReduce program which determines the number of hits to the site made by each different IP address
# Line example:
# 10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] "GET /assets/js/lowpro.js HTTP/1.1" 200 10469
for line in sys.stdin:
data = line.strip().split(' ')
if len(data) is 10:
ip, identity, username, datetime, tz, method, page, protocol, status, content_size = data
print(ip)
|
class Heap:
heap = []
treetable = dict()
def getLeftchild(node):
return 2*node+1
def getRightchild(node):
return 2*node+2
def parent(node):
if node == 0:
return None
elif node == 1:
return 0
return int((node-1)/2)
#given node positions and their respective tree, swap the two values
def swap(self, node1, node2):
array = type(self).heap
table = type(self).treetable
temp = array[node1]
temp2 = array[node2]
if temp in table:
if node1 in table[temp]:
table[temp].remove(node1)
table[temp].add(node2)
if temp2 in table:
if node2 in table[temp2]:
table[temp2].remove(node2)
table[temp2].add(node1)
array[node1] = temp2
array[node2] = temp
#take a node and bubble up if parent is smaller
def bubbleup(self, node):
array = type(self).heap
parent = type(self).parent(node)
if parent != None:
if array[parent] > array[node]:
self.swap(parent, node)
self.bubbleup(parent)
#take a node and bubble down if children are bigger. Favors left node in tie
def bubbledown(self, node):
array = type(self).heap
leftchild = type(self).getLeftchild(node)
rightchild = type(self).getRightchild(node)
lastIndex = len(array)-1
if leftchild <= lastIndex: #checkbounds
if array[leftchild] != None: #check if left not null
if array[leftchild] < array[node]: #check if leftchild is smaller than node
if array[rightchild] != None: #check if right is not null
if(array[leftchild] <= array[rightchild]): #if left is smaller then right bubble there
self.swap(node, leftchild)
self.bubbledown(leftchild)
else:
self.swap(node, rightchild)
self.bubbledown(rightchild)
else:
self.swap(node, leftchild)
self.bubbledown(leftchild)
elif rightchild <= lastIndex: #checkbounds
if array[rightchild] != None: #check if right is not null
if array[rightchild] < array[node]: #check if right is smaller than node
sekl.swap(node, rightchild)
self.bubbledown(rightchild)
#insert a value into the heap
def insert(self, value):
array = type(self).heap
table = type(self).treetable
array.append(value)
arrayLength = len(array)-1
if value in table:
hashset = table[value]
hashset.add(arrayLength)
table[value] = hashset
else:
table[value] = {arrayLength}
self.bubbleup(arrayLength)
#removes root value of heap
def poll(self):
array = type(self).heap
table = type(self).treetable
lastIndex = len(array)-1
if(lastIndex >= 0):
value = array[0] #key
if value in table: #if key is in table
table[ alue].remove(0) #remove the primary node(0)
if len(table[value]) is 0:
del table[value]
self.swap(0, lastIndex)
array.pop()
if(lastIndex-1 >= 0):
self.bubbledown(0)
#removes one instance of the given value from heap
def remove(self, value):
array = type(self).heap
table = type(self).treetable
lastIndex = len(array)-1
if value in table:
index = table[value].pop()
if len(table[value]) is 0:
del table[value]
self.swap(index, lastIndex)
array.pop()
if(index < lastIndex-1):
self.bubbledown(index)
self.bubbleup(index)
tree = Heap()
counter = 0
from random import random
while counter < 10:
tree.insert(int(random()*10))
counter+=1
print(tree.heap)
print(tree.treetable)
|
import time
from config import *
from models import *
from user import User
from position import Position
import utils
from alert_event import AlertEvent
from alert import Alert
def clear_all_data():
if input('Are you sure you want to delete everything? YES/no ') == 'YES':
for p in Position.objects():
print('Deleting', p)
for a in Alert.objects(position=p):
print('Deleting', a)
a.delete()
for a in AlertEvent.objects(position=p):
print('Deleting', a)
a.delete()
p.delete()
def create_user(role=None):
u = User.create(
first_name='Francis',
last_name='Larson',
email='fsnlarson@gmail.com',
password='password',
phone_number='9178262319',
trading_account_token='test_token',
)
u.role = role
u.save()
return u
if __name__ == '__main__':
u = User.objects(email='fsnlarson+admin@gmail.com').first()
u.role = 'admin'
u.save()
|
# import tkinter as tk
#
#
# # ************************
# # Scrollable Frame Class
# # ************************
# class ScrollFrame(tk.Frame):
# def __init__(self, parent):
# super().__init__(parent) # create a frame (self)
#
# self.canvas = tk.Canvas(self, borderwidth=0, background="#ffffff") # place canvas on self
# self.viewPort = tk.Frame(self.canvas,
# background="#ffffff") # place a frame on the canvas, this frame will hold the child widgets
# self.vsb = tk.Scrollbar(self, orient="vertical", command=self.canvas.yview) # place a scrollbar on self
# self.canvas.configure(yscrollcommand=self.vsb.set) # attach scrollbar action to scroll of canvas
#
# self.vsb.pack(side="right", fill="y") # pack scrollbar to right of self
# self.canvas.pack(side="left", fill="both", expand=True) # pack canvas to left of self and expand to fil
# self.canvas.create_window((4, 4), window=self.viewPort, anchor="nw", # add view port frame to canvas
# tags="self.viewPort")
#
# self.viewPort.bind("<Configure>",
# self.onFrameConfigure) # bind an event whenever the size of the viewPort frame changes.
#
# def onFrameConfigure(self, event):
# '''Reset the scroll region to encompass the inner frame'''
# self.canvas.configure(scrollregion=self.canvas.bbox(
# "all")) # whenever the size of the frame changes, alter the scroll region respectively.
#
#
# # ********************************
# # Example usage of the above class
# # ********************************
#
# class Example(tk.Frame):
# def __init__(self, root):
# tk.Frame.__init__(self, root)
# self.scrollFrame = ScrollFrame(self) # add a new scrollable frame.
#
# # Now add some controls to the scrollframe.
# # NOTE: the child controls are added to the view port (scrollFrame.viewPort, NOT scrollframe itself)
# for row in range(100):
# a = row
# tk.Label(self.scrollFrame.viewPort, text="%s" % row, width=3, borderwidth="1",
# relief="solid").grid(row=row, column=0)
# t = "this is the second column for row %s" % row
# tk.Button(self.scrollFrame.viewPort, text=t, command=lambda x=a: self.printMsg("Hello " + str(x))).grid(
# row=row, column=1)
#
# # when packing the scrollframe, we pack scrollFrame itself (NOT the viewPort)
# self.scrollFrame.pack(side="top", fill="both", expand=True)
#
# def printMsg(self, msg):
# print(msg)
#
#
# if __name__ == "__main__":
# root = tk.Tk()
# Example(root).pack(side="top", fill="both", expand=True)
# root.mainloop()
# import tkinter as tk
#
# class AutoScrollbar(tk.Scrollbar):
# # a scrollbar that hides itself if it's not needed. only
# # works if you use the grid geometry manager.
# def set(self, lo, hi):
# if float(lo) <= 0.0 and float(hi) >= 1.0:
# # grid_remove is currently missing from Tkinter!
# self.tk.call("grid", "remove", self)
# else:
# self.grid()
# tk.Scrollbar.set(self, lo, hi)
# # def pack(self, **kw):
# # raise TclError, "cannot use pack with this widget"
# # def place(self, **kw):
# # raise TclError, "cannot use place with this widget"
#
#
# root = tk.Tk()
#
# vscrollbar = AutoScrollbar(root)
# vscrollbar.grid(row=0, column=1, sticky=tk.N+tk.S)
# hscrollbar = AutoScrollbar(root, orient=tk.HORIZONTAL)
# hscrollbar.grid(row=1, column=0, sticky=tk.E+tk.W)
#
# canvas = tk.Canvas(root,
# yscrollcommand=vscrollbar.set,
# xscrollcommand=hscrollbar.set)
# canvas.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
#
# vscrollbar.config(command=canvas.yview)
# hscrollbar.config(command=canvas.xview)
#
# # make the canvas expandable
# root.grid_rowconfigure(0, weight=1)
# root.grid_columnconfigure(0, weight=1)
#
# #
# # create canvas contents
#
# frame = tk.Frame(canvas)
# frame.rowconfigure(1, weight=1)
# frame.columnconfigure(1, weight=1)
#
# rows = 77
# for i in range(1,rows):
# for j in range(1,3):
# button = tk.Button(frame, padx=7, pady=7, text="[%d,%d]" % (i,j))
# button.grid(row=i, column=j, sticky='news')
#
# canvas.create_window(0, 0, anchor=tk.NW, window=frame)
#
# frame.update_idletasks()
#
# canvas.config(scrollregion=canvas.bbox("all"))
#
# root.mainloop()
# import sqlite3 as sql
# conn = sql.connect(r'Schedulerdatabase.db')
# c = conn.cursor()
# dataCopy = c.execute("select count(*) from employee")
# values = dataCopy.fetchone()
# print(values[0])
import math
import os
import random
import re
import sys
#
# Complete the 'alphaBeta' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY pile as parameter.
#
#!/bin/python
import math
import os
import random
import re
import sys
#
# Complete the 'nuclearFusion' function below.
#
# The function accepts INTEGER_ARRAY elements as parameter.
#
#
def nuclearFusion(elementss):
elements = []
winners = []
losers = []
for i in elementss:
elements.append(i)
def is_end(list, n):
for i in range(len(list)):
if n == list[i]:
return False
else:
pass
return True
def check_ends(list):
out = []
for x in range(len(list)):
if is_end(elements, x+2):
out.append(x)
outt = []
for x in range(len(out)):
if elements[out[x]] not in outt:
outt.append(elements[out[x]])
return outt
def run_connects(list):
cur_lose = []
connects = check_ends(list)
for i in range(len(connects)):
end_nodes = []
for j in range(len(list)):
if connects[i] == list[j]:
end_nodes.append(j + 2)
print(len(end_nodes))
if len(end_nodes) > 1:
if len(end_nodes) % 2 == 1:
print(len(end_nodes)//2)
for k in range(len(end_nodes)//2):
print(end_nodes[0:1])
losers.append(end_nodes[0:1])
cur_lose.append(end_nodes[0:1])
end_nodes.remove(end_nodes[0:1])
print(cur_lose)
for i in cur_lose:
print(i)
list.remove(list[i])
run_connects(elements)
return losers
if __name__ == '__main__':
n = int(input().strip())
elements = map(int, input().rstrip().split())
print(nuclearFusion(elements))
#
# def is_end(list, n):
# for i in range(len(list)):
# if n == list[i]:
# return False
# else:
# pass
# return True
# for i in range(8):
#
# print(is_end([1, 1, 1, 2, 2, 4, 4, 4], i + 2)) |
name = " kittisak "
name_1 = "KITTISAK"
name_2 = "Kittisak frank"
name_3 = "kittisak nuntasen frankygo"
name_4 = "NUNTASEN"
print(len(name))# หาความยาวของข้อความ
name.split #ลบช่องวางในข้อความ
name.lstrip #ลบช่องวางซ้าย rsptrip ลบขวา
print(name)
print(name_1.lower()) #พิมเล็ก
print(name.upper()) #พิมใหญ่
print(name.capitalize()) #ขึ้นต้นเป็นพิมใหญ่
print(name_2.replace("frank","Nuntasen")) #แทนที่ตัวหน้าโดยจัวหลัง
x = "frankygo" in name_3 #เช็คข้อความว่ามีอยู่ไหม
print(x)
print(name_1 +" "+ name_4) #ต่อ string
text = "ชื่อ :{}\tนามสกุล :{}"
print(text.format(name_1,name_4))
a = "1 2 2 3 3 4 4 5 5 2 2 3 4"
print(a.count("2")) #หาตำแหน่งว่ามีกี่ที่
|
import logging
from flask import Flask
from routes import api
FORMAT = '%(asctime)s|%(name)s|%(levelname)s|%(lineno)d|%(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
LOGGER = logging.getLogger(__name__)
app = Flask(__name__)
app.register_blueprint(api.core)
if __name__ == "__main__":
app.run(debug=True)
|
import torch
print(torch.__version__)
import torch.optim as optim
import torch.utils.data as data_utils
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from pytorch_lightning.metrics import Accuracy
from network import DeepFMNet
from data_loader import CustomDataset
EPOCHS = 500
EMBEDDING_SIZE = 5
BATCH_SIZE = 512
NROF_LAYERS = 3
NROF_NEURONS = 50
DEEP_OUTPUT_SIZE = 50
NROF_OUT_CLASSES = 1
LEARNING_RATE = 3e-4
TRAIN_PATH = '/home/denis/repos/sber_risk_DL/week11/data/train_adult.pickle'
VALID_PATH = '/home/denis/repos/sber_risk_DL/week11/data/valid_adult.pickle'
class DeepFM:
def __init__(self):
self.train_dataset = CustomDataset(TRAIN_PATH)
self.train_loader = data_utils.DataLoader(dataset=self.train_dataset,
batch_size=BATCH_SIZE, shuffle=True)
self.vall_dataset = CustomDataset(VALID_PATH)
self.vall_loader = data_utils.DataLoader(dataset=self.vall_dataset,
batch_size=BATCH_SIZE, shuffle=False)
self.build_model()
self.log_params()
#self.train_writer = SummaryWriter('./logs/train')
#self.valid_writer = SummaryWriter('./logs/valid')
return
def build_model(self):
self.network = DeepFMNet(nrof_cat=self.train_dataset.nrof_emb_categories, emb_dim=EMBEDDING_SIZE,
emb_columns=self.train_dataset.embedding_columns,
numeric_columns=self.train_dataset.numeric_columns,
nrof_layers=NROF_LAYERS, nrof_neurons=NROF_NEURONS,
output_size=DEEP_OUTPUT_SIZE,
nrof_out_classes=NROF_OUT_CLASSES)
self.loss = torch.nn.BCEWithLogitsLoss()
self.accuracy = Accuracy()
self.optimizer = optim.Adam(self.network.parameters(), lr=LEARNING_RATE)
return
def log_params(self):
return
def load_model(self, restore_path=''):
if restore_path == '':
#self.step = 0
pass
else:
pass
return
def run_train(self):
print('Run train ...')
self.load_model()
for epoch in range(EPOCHS):
self.network.train()
batch_loss_train = []
batch_acc_train = []
for features, label in self.train_loader:
# Reset gradients
self.optimizer.zero_grad()
output = self.network(features)
# Calculate error and backpropagate
loss = self.loss(output, label)
output = torch.sigmoid(output)
loss.backward()
acc = self.accuracy(output, label).item()
# Update weights with gradients
self.optimizer.step()
batch_loss_train.append(loss.item())
batch_acc_train.append(acc)
#self.train_writer.add_scalar('CrossEntropyLoss', np.mean(batch_loss_train), epoch)
#self.train_writer.add_scalar('Accuracy', np.mean(batch_acc_train), epoch)
batch_loss_vall = []
batch_acc_vall = []
self.network.eval()
with torch.no_grad():
for features, label in self.vall_loader:
vall_output = self.network(features)
vall_loss = self.loss(vall_output, label)
vall_output = torch.sigmoid(vall_output)
vall_acc = self.accuracy(vall_output, label).item()
batch_loss_vall.append(vall_loss.item())
batch_acc_vall.append(vall_acc)
#self.valid_writer.add_scalar('CrossEntropyLoss', np.mean(batch_loss_vall), epoch)
#self.valid_writer.add_scalar('Accuracy', np.mean(batch_acc_vall), epoch)
return
deep_fm = DeepFM()
deep_fm.run_train() |
#!/usr/bin/env python3
'''
Prompt:
Let S(A) represent the sum of elements in set A of size n. We shall call it a
special sum set if for any two non-empty disjoint subsets, B and C, the
following properties are true:
S(B) ≠ S(C); that is, sums of subsets cannot be equal.
If B contains more elements than C then S(B) > S(C).
If S(A) is minimised for a given n, we shall call it an optimum special sum
set. The first five optimum special sum sets are given below.
n = 1: {1}
n = 2: {1, 2}
n = 3: {2, 3, 4}
n = 4: {3, 5, 6, 7}
n = 5: {6, 9, 11, 12, 13}
It seems that for a given optimum set, A = {a1, a2, ... , an}, the next
optimum set is of the form B = {b, a1+b, a2+b, ... ,an+b}, where b is the
"middle" element on the previous row.
By applying this "rule" we would expect the optimum set for n = 6 to be
A = {11, 17, 20, 22, 23, 24}, with S(A) = 117. However, this is not the
optimum set, as we have merely applied an algorithm to provide a near optimum
set. The optimum set for n = 6 is A = {11, 18, 19, 20, 22, 25}, with
S(A) = 115 and corresponding set string: 111819202225.
Given that A is an optimum special sum set for n = 7, find its set string.
This problem is related to problems 105 and 106.
Solution: 20313839404245
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Notes:
To solve this problem we first use the algorithm described above to get a
starting point. Using that process we find that a not-necessarily-minimal
special sum set for n = 7 is:
{20, 31, 38, 39, 40, 42, 45}
After finding this baseline set we make the assumption that the optimum
special sum set for n = 7 is close to this set. Though sets are unordered
collections of members, it is convenient to think of this assumption in the
following way: we assume that the first value in our optimum set is within
three integers of 20 (i.e., the first value in our baseline set), the second
is within three integers of 31, and so on.
We now move on to the task of finding candidate sets that may be the
optimum special sum set that we are looking for. To generate these candidates
we take the Cartesian product of ranges around each value in our baseline set.
Specifically we take the Cartesian product of lists of integers between three
less than and three more than (inclusive) each member of our baseline set. In
Python, the Cartesian product function returns tuples, so we will hereafter
refer to candidate tuples rather than candidate sets unless otherwise noted.
For each candidate tuple we need to test if it fits our criteria and is
more-optimum than the most-minimal special sum tuple we have found so far.
(The most-minimal special sum tuple so far is initially defined to be our
baseline set.) To do so we check three conditions:
1. The sum of the candidate tuple must be less than sum of the current-best
tuple, as we are seeking a minimal value. Any candidate tuple with an
equal or larger sum is ignored;
2. Each number in the candidate tuple must be distinct. In Python, tuples
may contain repetitions of values. As our solution must be a set and
thus cannot contain duplicates we check the length of the candidate
tuple when it is converted to a set. This conversion will eliminate
any duplicates present in the tuple.
If the length after conversion is less than seven, then the
candidate tuple contained duplicate values and we move on to the next
candidate. Only if the length after conversion is equal to seven do we
continue on to the next condition.
Note that we do not convert the candidate to a set in place, as
doing so nearly doubles the execution time of this script. I am not
sure why this is, but I will update this solution description if I am
able to deduce the cause of this change in efficiency. Only if a
candidate tuple has a more-minimal sum than the current-best tuple
will we convert it to a set permanently; and,
3. The candidate tuple must represent a special sum set and thus must have
two properties as described in the problem outline: each subset sum
must be unique, and every subset must have a larger sum than any
subsets with which it is disjoint that contain fewer members.
To check if the candidate tuple has these two properties we first
create the power set (i.e., the set of all subsets) of our candidate
tuple. (We will refer to these as subsets despite the fact that the
candidate is actually a tuple and not a set. Also, we will refer to
the power set although we will actually be using a list of subsets.)
It is important to note that the power set contains 2 ** n subsets for
a set with n members and includes the null set. Since the problem
requires that we focus on non-empty sets we remove the null set from
the list of subsets, leaving us with a set of 2 ** n - 1 subsets.
Next we check that the sum of each subset is unique. Rather than
comparing every possible pair of subsets, we instead use the fact that
the number of sets in out modified power set is 2 ** n - 1. To check
the uniqueness of the subset sums, then, we create a set containing
the sum of every subset. If the length of this set of sums is equal to
2 ** n - 1 then it is clear that each subset has a unique sum. If
the length is actually smaller then we know that at least two subsets
have the same sum, implying that the candidate tuple does not satisfy
this required property of special sum sets and can be removed from
consideration.
After this we focus on the second property: for every pair of
disjoint subsets B and C, if B contains more values than C, then
the sum of B must be larger than the sum of C. Again, rather than
comparing all possible pairs of disjoint subsets--in this case only
those with different lengths--we can save a significant amount of time
through a more-thoughtful approach. We simply focus only on the extreme
pairs of subsets of different lengths to create three sub-conditions
that must hold if the candidate tuple represents a special sum set:
1. The sum of the two-member subset with the smallest sum of all
two-member subsets must be greater than the sum of the
one-member subset with the largest sum of all one-member
subsets (i.e., the largest individual member of the candidate
set);
2. The sum of the three-member subset with the smallest sum must
be greater than the sum of the two-member subset with the
largest sum; and,
3. The sum of the four-member subset with the smallest sum must be
greater than the sum of the three-member subset with the
largest sum.
We check these three sub-conditions by first creating a sorted
list containing the same elements as the candidate tuple except in
increasing order. We then focus on the first sub-condition by creating
a variable that is the sum of the first two (i.e., smallest two) items
in the sorted list and a variable that is equal to the last (i.e.,
largest) item in the sorted list. If the first variable is larger,
then we continue by increasing each variable by the appropriate next
item. That is, we add the third-smallest item to the total of the
smallest items and the second-largest item to the value of the largest
item. Again, if the first value is larger we continue by adding
another item to each sum and comparing again. If for all three
sub-conditions the sum of the subset containing more members is larger
then we can finally say that the candidate tuple represents a special
sum set.
If the candidate tuple satisfies all three of these conditions and their
sub-conditions and properties, then that candidate tuple represents our new
current-best special sum set. Once we have looped over every candidate tuple,
then the tuple stored as our current-best represents the optimum special sum
set.
The solution, however, is not the set itself, but rather the corresponding set
string. To create the set string we convert our current-best tuple to a
sorted, ascending list and then convert each element to a string, all of which
are then joined into the set string.
'''
import itertools
import time
def power_set(members):
'''(tuple OR set OR list) -> list of sets
Takes members, which is a tuple, set, or list representing the elements of
the set for which a power set is desired. Populates a list with all the
subsets of the group of members including the null set. Returns this list
of sets.
Note: in Python creating sets of sets is complicated and requires ordered
sets, so this implementation actually returns a list of sets, not a
set of sets.
>>> power_set(set([1, 2, 3]))
[set(), {1}, {2}, {1, 2}, {3}, {1, 3}, {2, 3}, {1, 2, 3}]
>>> power_set([4, 21])
[set(), {4}, {21}, {4, 21}]
>>> power_set((10, 9, 4))
[set(), {10}, {9}, {9, 10}, {4}, {10, 4}, {9, 4}, {9, 10, 4}]
'''
subsets = [[]]
for member in members:
subsets.extend([subset + [member] for subset in subsets])
return [set(x) for x in subsets] # Converts to list of sets.
def has_unique_subset_sums(members):
'''(tuple OR set OR list) -> bool
Takes members, which is a tuple, set, or list representing the elements of
the set for which we would like to know if all subset sums are unique. The
parameter members will henceforth be referred to as a set for convenience.
Returns True if the number of non-empty subsets of members is equal to the
number of elements in a set containing the sum of each subset. Returns
False if the lengths differ, as this implies that at least two subsets
have the same sum.
>>> has_unique_subset_sums({2, 3, 4})
True
>>> has_unique_subset_sums((3, 5, 6, 7))
True
>>> has_unique_subset_sums([1, 3, 4])
False
'''
# Lists all subsets of members and removes the null set from consideration:
subsets = power_set(members)
del subsets[0] # We are interested only in non-empty subsets.
# Creates a set containing the sums of all the subsets of members:
sums = set(sum(subset) for subset in subsets)
return len(sums) == 2 ** len(members) - 1
def has_duplicates(members):
'''(tuple OR list) -> bool
Takes members, which is a tuple or list representing the elements of the
set for which a power set is desired. The parameter members will
henceforth be referred to as a set for convenience. Returns True if
members contains no duplicate values. Returns False otherwise.
>>> has_duplicates([1, 2, 1, 3])
True
>>> has_duplicates((2, 3, 4, 5))
False
'''
if len(members) != len(set(members)):
return True
else:
return False
def is_special_sum_set(members):
'''(tuple OR set OR list) -> bool
Takes members, which is a tuple, set, or list representing the elements of
the set for which a power set is desired. The parameter members will
henceforth be referred to as a set for convenience. Returns True only if
these two conditions both hold:
1. The sum of each subset of members is unique. This is checked by
the function has_unique_subset_sums; and,
2. For any non-empty, disjoint subsets B and C, if B has more elements
than C, then sum(B) > sum(C).
Returns False otherwise.
>>> is_special_sum_set({2, 3, 4})
True
>>> is_special_sum_set((3, 5, 6, 7))
True
>>> is_special_sum_set([1, 3, 4])
False
'''
if (has_unique_subset_sums(members) and
larger_subsets_have_larger_sums(members)):
return True
else:
return False
def larger_subsets_have_larger_sums(members):
'''(tuple OR set OR list) -> bool
Takes members, which is a tuple, set, or list representing the elements of
the set for which a power set is desired. The parameter members will
henceforth be referred to as a set for convenience. Checks that for any
non-empty, disjoint subsets B and C, if B has more elements than C, then
sum(B) > sum(C). Returns True if this condition holds. Returns False
otherwise.
This condition is checked by comparing extreme cases: we consider every
possible pair of disjoint subset sizes n and n - 1 for 1 < n. For each
pair of sizes we compare the smallest possible sum of a subset of size n
to the largest sum of a subset of size n - 1. (We increment n until any
further increase in n would imply that the two subsets would not be
disjoint. This occurs when n is equal to the integer portion of the length
of members divided by two.) If the sums of the subsets of size n are
larger than the sums of the subsets of size n - 1 in every case we know
that the condition holds and we return True. If any sum of the subset of
size n - 1 is larger than the associated sum of the subset of size n then
we have proven that there is at least pair of sizes for which the
condition does not hold and we return False.
For example, if members contains 7 elements we compare the sums of these
pairs of subsets:
i. The smallest-sum subset of size 2 vs. the largest-sum
subset of size 1;
ii. The smallest-sum subset of size 3 vs. the largest-sum
subset of size 2;
iii. The smallest-sum subset of size 4 vs. the largest-sum
subset of size 3.
>>> larger_subsets_have_larger_sums({3, 5, 6, 7})
True
>>> larger_subsets_have_larger_sums([2, 5, 6, 7])
False
>>> larger_subsets_have_larger_sums((1, 2))
True
'''
members_count = len(members)
sorted_members = sorted(members) # Sorted to make indexing easier.
smallest_n_sum = sorted_members[0] # Smallest sum of an n-member subset.
largest_n_minus_one_sum = 0 # Largest sum of an (n - 1)-member subset.
# Compares all n and n - 1 disjoint subset size pairs:
for index in range(members_count // 2):
# Adds next-largest item to smallest_n_sum:
smallest_n_sum += sorted_members[index + 1]
# Adds next-smallest item to largest_n_minus_one_sum:
largest_n_minus_one_sum += sorted_members[members_count - index - 1]
# Condition fails if any smaller subset doesn't have a smaller sum:
if smallest_n_sum <= largest_n_minus_one_sum:
return False
# Returns True if both conditions hold:
return True
if __name__ == '__main__':
start_time = time.time()
# Initializes variables that track the most-optimum special sum set so far:
current_best = set([20, 31, 38, 39, 40, 44, 46])
current_best_sum = sum(current_best)
# Loops over each candidate tuple close to the baseline set:
for candidate in itertools.product(*[list(range(x - 3, x + 4)) for
x in current_best]):
# Skips any candidate tuples with larger sums than the current minimum:
if sum(candidate) >= current_best_sum:
continue
# Skips any candidate tuples containing duplicate values:
elif has_duplicates(candidate):
continue
# Checks if the candidate is a special sum set:
elif is_special_sum_set(candidate):
# Records the new most-optimum candidate set and its sum:
current_best = set(candidate)
current_best_sum = sum(candidate)
# Converts optimum set to a sorted list and then the associated set string:
optimum_list = sorted(list(current_best))
solution_set_string = "".join([str(item) for item in optimum_list])
print("The set string of the optimum special sum set for n = 7 is {}."
.format(solution_set_string))
end_time = time.time()
print("Execution time: {}".format(end_time - start_time)) |
from app import object_to_initialize
print("--- Object to initialize field_1 = " + object_to_initialize.field_1)
|
from bs4 import BeautifulSoup
import aiohttp
import asyncio
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# pip install beautifulsoup4
"""
웹 크롤링 : 검색 엔진의 구축 등을 위하여 특정한 방법으로 웹 페이지를 수집하는 프로그램
웹 스크래핑 : 웹에서 데이터를 수집하는 프로그램
"""
async def fetch(session, url, i):
print(i + 1)
async with session.get(url) as response:
html = await response.text()
soup = BeautifulSoup(html, "html.parser")
cont_thumb = soup.find_all("div", "cont_thumb")
for cont in cont_thumb:
title = cont.find("p", "txt_thumb")
if title is not None:
print(title.text)
async def main():
BASE_URL = "https://bjpublic.tistory.com/category/%EC%A0%84%EC%B2%B4%20%EC%B6%9C%EA%B0%84%20%EB%8F%84%EC%84%9C"
urls = [f"{BASE_URL}?page={i}" for i in range(1, 10)]
async with aiohttp.ClientSession() as session:
await asyncio.gather(*[fetch(session, url, i) for i, url in enumerate(urls)])
if __name__ == "__main__":
asyncio.run(main())
|
# circle 클래스 생성
# 속성(변수) : 반지름 -> radius , 외부에서 속성을 참조하지 못 하도록 보호
# 기능(메서드) : 원의 둘레, 원의 넓이
import math
class Circle:
#생성자
def __init__(self, radius):
self.__radius = radius
#__radius의 getter
@property
def radius(self):
return self.__radius
#__radius의 setter
@radius.setter
def radius(self, value):
self.__radius=value
# getter : __radius
def get_radius(self):
return self.__radius
# setter : __radius
def set_radius(self, value):
self.__radius=value
# 둘레
def get_circumference(self):
return 2*math.pi*self.__radius
# 넓이
def get_cirarea(self):
return math.pi*self.__radius*self.__radius
# 인스턴스 생성
c_1 = Circle(10)
# setter 메서드를 통해서 변수에 값을 대입
c_1.set_radius(30)
c_1.radius=50
# getter 메서드를 통해서 __radius 값을 반환
print('반지름 :', c_1.get_radius(), sep='\t')
print('반지름 :', c_1.radius, sep='\t')
# 둘레
print('원의 둘레 :', c_1.get_circumference())
# 넓이
print('원의 넓이 :', c_1.get_cirarea()) |
#!/usr/bin/env python3
import sqlite3
import config
conn = sqlite3.connect(config.USERS_DB_PATH)
conn.cursor().execute('''CREATE TABLE IF NOT EXISTS users(
id INTEGER PRIMARY KEY,
username TEXT UNIQUE NOT NULL,
password TEXT NOT NULL,
reg_time TEXT NOT NULL,
last_login TEXT
)''')
conn.cursor().execute('''CREATE TABLE IF NOT EXISTS messages(
id INTEGER PRIMARY KEY,
src TEXT NOT NULL,
dest TEXT NOT NULL,
time TEXT NOT NULL,
msg TEXT NOT NULL,
read INTEGER NOT NULL
)''')
conn.commit()
|
from django.apps import AppConfig
class RegulatoryInfoConfig(AppConfig):
name = 'regulatory_info'
|
# Задача-1:
# Дан список, заполненный произвольными целыми числами, получите новый список,
# элементами которого будут квадратные корни элементов исходного списка,
# но только если результаты извлечения корня не имеют десятичной части и
# если такой корень вообще можно извлечь
# Пример: Дано: [2, -5, 8, 9, -25, 25, 4] Результат: [3, 5, 2]
import math
print ("\n### Задача-1.Normal")
my_list_1 = [2, -5, 8, 9, -25, 25, 4]
result = []
for item in my_list_1:
if (item > 0):
n = math.sqrt(item)
if (int(n)==float(n)):
result.append(int(n))
print(result)
# Задача-2: Дана дата в формате dd.mm.yyyy, например: 02.11.2013.
# Ваша задача вывести дату в текстовом виде, например: второе ноября 2013 года.
# Склонением пренебречь (2000 года, 2010 года)
print ("\n### Задача-2.Normal")
date_str="10.05.2019"
dict_month={
"01": "января",
"02": "февраля",
"03": "марта",
"04": "апреля",
"05": "мая",
"06": "июня",
"07": "июля",
"08": "августа",
"09": "сентбря",
"10": "октября",
"11": "ноября",
"12": "декабря"
}
dict_days={
"01": "первое",
"02": "второе",
"03": "третье",
"04": "четвертое",
"05": "пятое",
"06": "шестое",
"07": "седьмое",
"08": "восьмое",
"09": "девятое",
"10": "десятое",
"11": "одинадцатое",
"12": "двенадцатое",
"13": "тринадцатое",
"14": "четырнадцатое",
"15": "пятнадцатое",
"16": "шестнадцатое",
"17": "семнадцатое",
"18": "восемнадцатое",
"19": "девятнадцатое",
"20": "двадцатое"
}
#выделить компоненты даты
date_list=date_str.split(".")
#преобразовать в текст
day_str= dict_days[date_list[0]]
month_str= dict_month[date_list[1]]
year_str=date_list[2]
print("Дата:{} =>{} {} {} года".format(date_str, day_str, month_str, year_str))
# Задача-3: Напишите алгоритм, заполняющий список произвольными целыми числами
# в диапазоне от -100 до 100. В списке должно быть n - элементов.
# Подсказка:
# для получения случайного числа используйте функцию randint() модуля random
print ("\n### Задача-3.Normal")
import random
result = []
n = 20
for i in range(0, n):
result.append(random.randint(-100, 100))
print(result)
# Задача-4: Дан список, заполненный произвольными целыми числами.
# Получите новый список, элементами которого будут:
# а) неповторяющиеся элементы исходного списка:
# например, lst = [1, 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 2, 4, 5, 6]
# б) элементы исходного списка, которые не имеют повторений:
# например, lst = [1 , 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 4, 6]
print("\n### Задача-4.Normal")
lst1=[1, 2, 4, 5, 6, 2, 5, 2]
lst2=set(lst1)
print(f"Исходный список: {lst1}")
print(f"Не повторяющиеся элементы исходного списка: {lst2}")
# б) элементы исходного списка, которые не имеют повторений:
result = []
for item in lst1:
if (lst1.count(item) == 1):
result.append(item)
print(f"Элементы которые не имеют повторов: {result}")
|
# used Functions - unique(), create dictionary with zip
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import cm
from sklearn.model_selection import train_test_split
df = pd.read_table('class_fruit_data_with_colors.txt')
df.head(10)
# create a mapping from fruit label value to fruit name to make results easier to interpret
key=df.fruit_label.unique()
value = df.fruit_name.unique()
lookup_fruit=dict(zip(key,value))
print(value) |
from taskplus.core.shared.domain_model import DomainModel
class Task(object):
def __init__(self, name, content, status, creator, doer=None, id=None):
self.name = name
self.content = content
self.status = status
self.creator = creator
self.doer = doer
self.id = id
DomainModel.register(Task)
|
"""inventory_manager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URL conf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.RequestHandler.as_view(), name='handle_request'),
path('modify/<request_id>/', views.RequestApprover.as_view(), name='approve_request'),
path('register/', views.UserCreate.as_view(), name='register_user'),
path('employee/', views.EmployeeHandler.as_view(), name='handle_employee'),
path('employee/<employee_id>/', views.EmployeeHandler.as_view(), name='handle_specific_employee'),
path('initial/', views.EmployeeUnprotected.as_view(), name='unprotected_employee'),
path('status/', views.StatusHandler.as_view(), name='status_modification'),
path('model/', views.ProductTypeHandler.as_view(), name='ProductType'),
path('product/', views.ProductsHandler.as_view(), name='handle_product'),
path('product/<product_id>/', views.ProductHandler.as_view(), name= 'individual_product'),
path('products/<filter_type>/<filter_value>', views.ProductHandler.as_view(), name='bulk_product'),
path('bulk/', views.BulkProducts.as_view(), name='bulk_upload'),
path('myproducts/<employee_id>/', views.EmployeeActions.as_view(), name='Employee_actions'),
path('stock/<store_id>', views.StockHandler.as_view(), name='stock_handler'),
path('store/<manager_id>', views.StoreHandler.as_view(), name='store_handler'),
path('assign/<employee_id>/', views.AssignProduct.as_view(), name='product_assignment'),
path('role/', views.RoleHandler.as_view(), name='role_handler')
]
|
import pydicom
import os
import numpy as np
import itertools
import glob
import argparse
import json
def normalize(x):
return x / np.sqrt(np.dot(x,x))
def readRotAndTrans(paths):
files = list(itertools.chain.from_iterable([glob.glob(path) for path in paths]))
head = [(np.array([1,0,0,0]),np.array([0,0,0]))]
ds = sorted([pydicom.dcmread(x) for x in files], key=lambda dcm: dcm.AcquisitionNumber)
imageComments = [ str.split(x.ImageComments) for x in ds[1:] if 'ImageComments' in x ]
return list(itertools.chain.from_iterable([head, [ (np.array(list(map(float, y[1:5]))), list(map(float, y[6:9]))) for y in imageComments] ]))
def angleAxisToQuaternion(a):
w = np.cos(a[0] / 2.0)
axisNorm = np.sqrt(np.dot(a[1:], a[1:]))
if 0 == axisNorm :
return np.array([1,0,0,0])
axisScale = np.sin(a[0] / 2.0) / np.sqrt(np.dot(a[1:], a[1:]))
tail = a[1:] * axisScale
q = np.ndarray(shape=(4))
q[0] = w
q[1:] = tail
return q
def quaternionToAxisAngle(q) :
a = np.ndarray(shape=(4))
a[0] = np.arccos(q[0]) * 2.0
a[1:] = q[1:] / np.sqrt(np.dot(q[1:], q[1:]))
return a
def quaternionToRotationMatrix(q):
w = q[0];
x = q[1];
y = q[2];
z = q[3];
wSq = w * w;
xSq = x * x;
ySq = y * y;
zSq = z * z;
wx2 = w*x*2.0;
wy2 = w*y*2.0;
wz2 = w*z*2.0;
xy2 = x*y*2.0;
xz2 = x*z*2.0;
yz2 = y*z*2.0;
rotMatrix = np.ndarray(shape=(3,3))
rotMatrix[0,0] = wSq + xSq - ySq - zSq;
rotMatrix[0,1] = xy2 - wz2;
rotMatrix[0,2] = xz2 + wy2;
rotMatrix[1,0] = xy2 + wz2;
rotMatrix[1,1] = wSq - xSq + ySq - zSq;
rotMatrix[1,2] = yz2 - wx2;
rotMatrix[2,0] = xz2 - wy2;
rotMatrix[2,1] = yz2 + wx2;
rotMatrix[2,2] = wSq - xSq - ySq + zSq;
return rotMatrix
def rotationMatrixToQuaternion(m):
## Dylan Dec 19/12
## This algorithm taken from http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/index.htm
##
## Also tried the algorithm in
## Animating rotation with quaternion curves.
## Ken Shoemake
## Computer Graphics 19(3):245-254, 1985
## http://portal.acm.org/citation.cfm?doid=325334.325242
## but you'll find that it's not numerically stable without some truncation epsilon.
## The algorithm we're using now doesn't require us to pick some arbitrary epsilon, so
## I like it better.
tr = np.trace(m);
if (tr > 0) :
S = np.sqrt(tr+1.0) * 2; # S=4*qw
SInv = 1.0 / S;
w = 0.25 * S;
x = (m[2,1] - m[1,2]) * SInv;
y = (m[0,2] - m[2,0]) * SInv;
z = (m[1,0] - m[0,1]) * SInv;
elif ((m[0,0] > m[1,1]) and (m[0,0] > m[2,2])):
S = np.sqrt(1.0 + m[0,0] - m[1,1] - m[2,2]) * 2; # S=4*qx
SInv = 1.0 / S;
w = (m[2,1] - m[1,2]) * SInv;
x = 0.25 * S;
y = (m[0,1] + m[1,0]) * SInv;
z = (m[0,2] + m[2,0]) * SInv
elif (m[1,1] > m[2,2]):
S = np.sqrt(1.0 + m[1,1] - m[0,0] - m[2,2]) * 2; # S=4*qy
SInv = 1.0 / S;
w = (m[0,2] - m[2,0]) * SInv;
x = (m[0,1] + m[1,0]) * SInv;
y = 0.25 * S;
z = (m[1,2] + m[2,1]) * SInv;
else:
S = np.sqrt(1.0 + m[2,2] - m[0,0] - m[1,1]) * 2; # S=4*qz
SInv = 1.0 / S;
w = (m[1,0] - m[0,1]) * SInv;
x = (m[0,2] + m[2,0]) * SInv;
y = (m[1,2] + m[2,1]) * SInv;
z = 0.25 * S;
return np.array([w, x, y, z])
def motionEntryToHomogeneousTransform(e) :
t = np.ndarray(shape=(4,4))
t[0:3,3] = e[1]
t[0:3,0:3] = quaternionToRotationMatrix(angleAxisToQuaternion(e[0]))
t[3,:] = [0,0,0,1]
return np.matrix(t)
def diffTransformToMaxMotion(t, radius):
angleAxis = quaternionToAxisAngle(rotationMatrixToQuaternion(t[0:3, 0:3]))
angle = angleAxis[0]
axis = angleAxis[1:]
trans = t[0:3,3].flatten()
t_rotmax = radius * np.sqrt(2.0 - 2.0 * np.cos(angle))
return np.sqrt(
(t_rotmax * t_rotmax) +
(2.0 * t_rotmax) *
np.linalg.norm(
trans - (np.dot(trans, axis) * axis)) +
(np.linalg.norm(trans) * np.linalg.norm(trans))
)
def diffTransformToRMSMotion(t, radius):
rotMatMinusIdentity = t[0:3,0:3] - np.array([[1,0,0],[0,1,0],[0,0,1]])
trans = np.ravel(t[0:3,3])
return np.sqrt(
0.2 * radius * radius * np.trace(np.transpose(rotMatMinusIdentity) * rotMatMinusIdentity) +
np.dot(trans, trans)
)
def parseMotion(rotAndTrans, tr, radius):
# Transform creation and differences
transforms = [motionEntryToHomogeneousTransform(e) for e in rotAndTrans]
diffTransforms = [ts[1] * np.linalg.inv(ts[0]) for ts in zip(transforms[0:], transforms[1:])]
# Motion scores
rmsMotionScores = [diffTransformToRMSMotion(t, radius) for t in diffTransforms]
maxMotionScores = [diffTransformToMaxMotion(t, radius) for t in diffTransforms]
scores = {}
scores['mean_rms'] = np.mean(rmsMotionScores) * 60.0 / tr
scores['mean_max'] = np.mean(maxMotionScores) * 60.0 / tr
scores['rms_scores'] = rmsMotionScores
scores['max_scores'] = maxMotionScores
return scores
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse DICOM files from a vNav series and convert them into different motion scores.')
parser.add_argument('--tr', required=True, type=float,
help='Repetition Time (TR) of the parent sequence (i.e., the MPRAGE) expressed in seconds.')
parser.add_argument('--input', nargs='+', required=True, type=os.path.abspath,
help='A list of DICOM files that make up the vNav series (in chronological order)')
parser.add_argument('--radius', required=True, type=float,
help='Assumed brain radius in millimeters for estimating rotation distance.')
output_type = parser.add_mutually_exclusive_group(required=True)
output_type.add_argument('--mean-rms', action='store_true', help='Print time-averaged root mean square (RMS) motion.')
output_type.add_argument('--mean-max', action='store_true', help='Print time-averaged max motion.')
output_type.add_argument('--rms-scores', action='store_true', help='Print root mean square (RMS) motion over time.')
output_type.add_argument('--max-scores', action='store_true', help='Print max motion over time.')
args = parser.parse_args()
scores = parseMotion(readRotAndTrans(args.input), args.tr, args.radius)
# Script output to STDOUT depending on "output_type"
if args.mean_rms:
print(scores['mean_rms'])
elif args.mean_max:
print(scores['mean_max'])
elif args.rms_scores:
print('\n'.join(map(str, scores['rms_scores'])))
elif args.max_scores:
print('\n'.join(map(str, scores['max_scores'])))
|
import random
import logging
import time
from abc import ABC, abstractmethod
from typing import Tuple, List, Dict, Any
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.patches import Ellipse
import numpy as np
from numpy import pi
import statistics
matplotlib.use('TkAgg')
logger = logging.getLogger('RLFR')
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
def init_logger():
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s\t| %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class InvertedPendulumRenderer:
"""
Renderer object for the 1-DOF Inverted Pendulum Environment
"""
def __init__(self, simulation_ax=None, reward_ax=None, length=1.0, radius=0.1, dt=0.1, max_timesteps=None):
"""
Initialize new object
:param simulation_ax: The axis which should be used for the simulation rendering. Creates a new figure if None.
:param reward_ax: The axis which should be used for the reward rendering. Creates a new figure if None.
:param length: The length of the robe in the simulation.
:param radius: The radius of the mass in the simulation.
:param dt: The delta timestep of the simulation, used only by the reward function.
:param max_timesteps: Sets the maximum value of the time in the reward plot.
"""
self._simulation_ax: plt.Axes = simulation_ax
self._reward_ax: plt.Axes = reward_ax
self._length = length
self._radius = radius
self._dt = dt
self._max_timesteps = max_timesteps
self._static_main_circle = None
self._static_reference_line = None
self._static_reference_mass_circle = None
self._dynamic_pendulum_line: plt.Line2D or None = None
self._dynamic_mass_circle: plt.Circle or None = None
self._dynamic_state_text: plt.Text or None = None
self._dynamic_reward_plot: plt.Line2D or None = None
self._simulation_ready = False
self._reward_plot_ready = False
self._previous_plot_time = None
plt.ion()
plt.show()
def __del__(self):
"""
Destructs the object
"""
plt.ioff()
plt.show()
def _initialize_simulation(self):
"""
Initialize the simulation plot. This method draws the static objects.
"""
self._static_main_circle = plt.Circle((0, 0), 0.025, color='black', fill=True)
self._static_reference_mass_circle = plt.Circle((0, self._length), self._radius, color='grey', fill=False,
linestyle='--')
self._static_reference_line = plt.Line2D([0.0, 0.0], [0.0, self._length - self._radius], color='grey',
linestyle='--')
self._dynamic_pendulum_line = plt.Line2D([0.0], [0.0], color='black')
self._dynamic_mass_circle = plt.Circle((np.inf, np.inf), self._radius, color='black', fill=True)
self._dynamic_state_text = plt.Text(1.5, 1, "Pos: \nVelo:", horizontalalignment='right')
if self._simulation_ax is None:
self._simulation_ax = self._create_figure()
self._simulation_ax.axis('equal')
limit = 1.5 * self._length
self._simulation_ax.set_xlim(-limit, limit)
self._simulation_ax.set_ylim(-limit, limit)
self._simulation_ax.set_axis_off()
self._simulation_ax.set_title("1-DOF Inverted Pendulum Simulation")
self._simulation_ax.add_artist(self._static_main_circle)
self._simulation_ax.add_artist(self._static_reference_mass_circle)
self._simulation_ax.add_artist(self._static_reference_line)
self._simulation_ax.add_artist(self._dynamic_pendulum_line)
self._simulation_ax.add_artist(self._dynamic_mass_circle)
self._simulation_ax.add_artist(self._dynamic_state_text)
self._simulation_ready = True
def _initialize_reward_plot(self):
"""
Initialize the reward plot. This method draws the static objects.
"""
if self._reward_ax is None:
self._reward_ax = self._create_figure()
self._reward_ax.set_xlim(0.0, self._max_timesteps)
self._reward_ax.set_ylim(-np.pi, 0.0)
self._reward_ax.set_title("1-DOF Inverted Pendulum Reward")
self._reward_ax.set_xlabel('Time [s]')
self._reward_ax.set_ylabel('Reward [-]')
self._dynamic_reward_plot = plt.Line2D([0.0], [0.0], color='black')
self._reward_ax.add_artist(self._dynamic_reward_plot)
self._reward_plot_ready = True
@staticmethod
def _create_figure() -> plt.Axes:
"""
Creates a new figure
:return: The axis of the figure
"""
dpi = 150
figsize = (5, 5)
figure = plt.figure(dpi=dpi, figsize=figsize)
ax = figure.gca()
return ax
def render_simulation(self, state):
"""
Renders the state of the simulation
:param state: The state to render
"""
if not self._simulation_ready:
self._initialize_simulation()
theta, theta_dot = state
pos_x = np.sin(theta)
pos_y = np.cos(theta)
self._dynamic_pendulum_line.set_data([0.0, pos_x], [0.0, pos_y])
self._dynamic_mass_circle.set_center((pos_x, pos_y))
self._dynamic_state_text.set_text(f"Pos: {theta: 4.4f}\nVelo: {theta_dot: 4.4f}")
self._simulation_ax.figure.canvas.draw()
self._simulation_ax.figure.canvas.flush_events()
def plot_reward(self, rewards: List[float]):
"""
Plots the state of the simulation
:param rewards: The list of rewards to plot
"""
if len(rewards) == 0:
return
if not self._reward_plot_ready:
self._initialize_reward_plot()
max_time = self._dt * len(rewards)
if self._max_timesteps is None:
self._reward_ax.set_xlim(0.0, max_time)
self._dynamic_reward_plot.set_data(np.linspace(0.0, max_time, num=len(rewards)), rewards)
self._reward_ax.figure.canvas.draw()
self._reward_ax.figure.canvas.flush_events()
def pause_until_simulation_end(self):
if self._previous_plot_time is None:
self._previous_plot_time = time.time()
return
current_time = time.time()
next_plot_time = self._previous_plot_time + self._dt
required_sleep_time = next_plot_time - current_time
if required_sleep_time > 0:
time.sleep(required_sleep_time)
self._previous_plot_time = next_plot_time
else:
self._previous_plot_time = time.time()
logger.debug(f"There is no need to wait for the simulation end")
class InvertedPendulumEnvironment:
"""
Class for the 1-D inverted pendulum environment
"""
class InvertedPendulumParameters:
"""
Class for the parameters of the inverted pendulum environment
"""
def __init__(self, mass=1.0, length=1.0, gravitational_acceleration=9.8, mu=0.01):
"""
Initialize a new parameter object
:param mass: The mass of the ball
:param length: The length of the robe
:param gravitational_acceleration: The applied gravitational force
:param mu: The friction coefficient of the robe
"""
self.mass = mass
self.length = length
self.gravitational_acceleration = gravitational_acceleration
self.mu = mu
def __init__(self, environment_parameters=InvertedPendulumParameters(), action_range=(-5.0, 5.0),
action_interval=0.1, update_interval=0.001, renderer=None):
"""
Initialize a new environment
:param environment_parameters: The dynamical parameters of the environment
:param action_range: The range of the applicable actions
:param action_interval: The time interval of the action
:param update_interval: The time interval of the Euler approximation
:param renderer: The renderer object for visualization
"""
self._environment_parameters = environment_parameters
self._action_range = action_range
self._action_interval = action_interval
self._update_interval = update_interval
if renderer is None:
renderer = InvertedPendulumRenderer(dt=action_interval)
self._renderer = renderer
self.state: Tuple[float, float] or None = None
self.time: float or None = None
self.rewards: List[float] or None = None
self._gravitational_force_cache = self._environment_parameters.mass * \
self._environment_parameters.gravitational_acceleration * \
self._environment_parameters.length
self._inertia_cache = (self._environment_parameters.mass * self._environment_parameters.length ** 2)
def reset(self) -> Tuple[float, float]:
"""
Resets the environment
:return: The initial state
"""
self.state = (pi, 0)
self.time = 0
self.rewards = []
return self.state
def step(self, action: float) -> Tuple[Tuple[float, float], float]:
"""
Runs one step in the environment
:return: Tuple of the next state and the reward receive by executing the action
"""
action_min, action_max = self._action_range
if action < action_min or action > action_max:
logger.debug(f"Requested action {action} exceeds the action limits{self._action_range}, "
f"it will be clipped!")
action = np.clip(action, action_min, action_max)
reward = self._get_reward(action)
next_state = self._get_next_state(action)
self.state = next_state
self.time += self._action_interval
self.rewards.append(reward)
return next_state, reward
def _get_reward(self, action: float) -> float:
"""
Get a reward given the current state and the applied action
:param action: The applied action
:return: The reward of the behaviour
"""
# Reward is independent from the action
reward = -abs(self.state[0])
return reward
def _get_next_state(self, action: float) -> Tuple[float, float]:
"""
Calculates the next state
:param action: The applied action
:return: The next state
"""
current_position, current_velocity = self.state
remaining_time = self._action_interval
dt = self._update_interval
next_position, next_velocity = current_position, current_velocity
# Euler approximation
while remaining_time > 0:
if dt < remaining_time:
dt = remaining_time
# Dynamics
acceleration = (-self._environment_parameters.mu * current_velocity +
self._gravitational_force_cache * np.sin(current_position) +
action) / \
self._inertia_cache
# Velocity approximation: theta_dot_t = theta_dot_(t - 1) + dt * theta_dotdot
next_velocity = self._clip_velocity(current_velocity + dt * acceleration)
# Position approximation: theta_t = theta_(t - 1) + dt * theta_dot_t + 1 / 2 * dt ^ 2 * theta_dotdot
next_position = current_position + dt * next_velocity + 0.5 * dt ** 2 * acceleration
current_position, current_velocity = next_position, next_velocity
remaining_time -= dt
next_position = self._normalize_position(current_position)
return next_position, next_velocity
@staticmethod
def _clip_velocity(velocity):
"""
Clip the velocity to the defined range
:param velocity: The velocity to clip
:return: The clipped velocity
"""
velocity_min, velocity_max = -2 * np.pi, 2 * np.pi
if velocity < velocity_min or velocity > velocity_max:
logger.debug(f"Velocity {velocity} exceeded the limits [{velocity_min}, {velocity_max}], "
f"it will be clipped")
velocity = np.clip(velocity, velocity_min, velocity_max)
return velocity
@staticmethod
def _normalize_position(position):
"""
Normalizes the position to the given range
:param position: The position to clip
:return: The normalized position
"""
position = position % (2 * np.pi)
if position > np.pi:
position -= (2 * np.pi)
return position
def render(self):
"""Renders the simulation"""
self._renderer.render_simulation(self.state)
def plot_reward(self):
"""Plots the rewards so far"""
self._renderer.plot_reward(self.rewards)
def wait_for_simulation(self):
"""Waits until the simulation step ends"""
self._renderer.pause_until_simulation_end()
def environment_simulation():
"""
Example function for the usage of the simulation
"""
# Create a new environment, this environment can be used over the whole training process
environment = InvertedPendulumEnvironment()
# Reset the environment to a random state
initial_state = environment.reset()
# Render the simulation if needed
environment.render()
# Plot the rewards if needed
environment.plot_reward()
# Define some actions
action_list = np.concatenate((np.repeat(0, 200), np.repeat(5, 100), np.repeat(0, 200)))
for action in action_list:
# Apply the action
state, reward = environment.step(action)
# Render the simulation if needed
environment.render()
# Plot the rewards if needed
environment.plot_reward()
# Sleep for the simulation time, if needed
environment.wait_for_simulation()
print("Done")
class VariableResolutionPartition:
def __init__(self, decision_boundary=None, thr_n=20, thr_var=1.12):
self.child_1: VariableResolutionPartition = None
self.child_2: VariableResolutionPartition = None
self.decision_boundary = decision_boundary # (s,a)
self.sample_number = 0
self.q_mean = 0
self.q_variance = 0.81
self.state_action_dict = dict()
# thresholds
self.thr_n = thr_n
self.thr_var = thr_var
def get_value(self, state, action):
theta, theta_dot = state
if self.child_1 is None and self.child_2 is None:
return self.sample_number, self.q_mean, self.q_variance
if all(np.array([theta, theta_dot, action]) < self.decision_boundary):
return self.child_1.get_value(state, action)
else:
return self.child_2.get_value(state, action)
def update_value(self, new_sample_state, new_sample_action, new_sample_Q):
new_theta, new_theta_dot = new_sample_state
# No children, just take the value and update the statistics
if self.decision_boundary is None:
if self.child_1 is None and self.child_2 is None:
self.sample_number += 1
a = 0.001
b = 10
# learning rate
alpha = 1 / (a * self.sample_number + b)
delta = new_sample_Q - self.q_mean
self.q_mean = self.q_mean + alpha * delta
self.q_variance = self.q_variance + alpha * ((delta * delta) - self.q_variance)
if new_sample_state not in self.state_action_dict:
self.state_action_dict[new_sample_state] = []
self.state_action_dict[new_sample_state].append(new_sample_action)
# Splitting criterion
if self.q_variance > self.thr_var and self.sample_number > self.thr_n:
self.split()
# Has children, check the decision boundary and ask the children to proceed
else:
if all(np.array([new_theta, new_theta_dot, new_sample_action]) < self.decision_boundary):
self.child_1.update_value(new_sample_state, new_sample_action, new_sample_Q)
else:
self.child_2.update_value(new_sample_state, new_sample_action, new_sample_Q)
def split(self, offset=0):
# split state-action space in 2 halves along the dimension with the largest size
action_list = list(self.state_action_dict.values())
action_list_flatten = [item for sublist in action_list for item in sublist]
# size of the action dimension in Q table
size_action = max(action_list_flatten) - min(action_list_flatten)
state_list = self.state_action_dict.keys()
theta_list_flatten = [state[0] for state in state_list]
theta_dot_list_flatten = [state[1] for state in state_list]
# size of the state dimension in Q table
size_theta = max(theta_list_flatten) - min(theta_list_flatten)
size_theta_dot = max(theta_dot_list_flatten) - min(theta_dot_list_flatten)
if size_action > size_theta and size_action > size_theta_dot:
# Split along the action dim
boundary_action = statistics.median(action_list_flatten)
boundary_theta = np.inf
boundary_theta_dot = np.inf
elif size_theta > size_action and size_theta > size_theta_dot:
# Split along the theta dim
boundary_action = np.inf
boundary_theta = statistics.median(theta_list_flatten)
boundary_theta_dot = np.inf
else:
# Split along the theta_dot dim
boundary_action = np.inf
boundary_theta = np.inf
boundary_theta_dot = statistics.median(theta_dot_list_flatten)
# # offset
# boundary_state = tuple([x + offset for x in boundary_state])
# boundary_action += offset
self.decision_boundary = np.array([boundary_theta, boundary_theta_dot, boundary_action])
self.child_1: VariableResolutionPartition = VariableResolutionPartition()
self.child_2: VariableResolutionPartition = VariableResolutionPartition()
class FunctionApproximator(ABC):
@abstractmethod
def query(self, x):
pass
@abstractmethod
def update(self, x, y) -> float:
pass
@abstractmethod
def estimate_max(self, x):
pass
class VariableResolutionApproximator(FunctionApproximator):
def __init__(self):
self.data = VariableResolutionPartition()
def query(self, state_action: Tuple[Tuple[float, float], float]) -> Tuple[float, float]:
state, action = state_action
# Find the specific partition
_, mean, variance = self.data.get_value(state, action)
return mean, variance
def update(self, new_sample_state_action: Tuple[Tuple[float, float], float],
new_sample_Q: float):
new_sample_state, new_sample_action = new_sample_state_action
# Update the binary tree
self.data.update_value(new_sample_state, new_sample_action, new_sample_Q)
def estimate_max(self, state_action: Tuple[Tuple[float, float], float]):
state, action = state_action
# get the maximum value and the corresponding action
action_list = np.linspace(-5, 5, 20)
q_list = []
mean_list = []
for action in action_list:
mean, variance = self.query((state, action))
q_rand = np.random.normal(mean, np.sqrt(variance))
q_list.append(q_rand)
mean_list.append(mean)
best_action = action_list[q_list.index(max(q_list))]
max_mean = max(mean_list)
return best_action, max_mean
class GMMApproximator(FunctionApproximator):
def __init__(self, input_dim, error_threshold, density_threshold, size_dimension, a=0.5, b=1):
"""Initialize the GMM with 1 Gaussian"""
self.input_dim = input_dim
self.error_threshold = error_threshold
self.density_threshold = density_threshold
self.a = a
self.b = b
self.D = self.input_dim + 1
self.d = size_dimension
self.volume_eps = np.prod(self.d / 10)
self.num_samples = 0
self.gaussian_weights = np.array([0.5, 0.5])
self.gaussian_means = np.zeros((2, self.D))
self.gaussian_covariances = np.eye(self.D)
self.gaussian_covariances = np.array([self.gaussian_covariances, self.gaussian_covariances])
self.sum_zero_order = np.array([1, 1]) # [1]_t
self.sum_first_order = np.zeros((2, self.D)) # [z]_t
self.sum_second_order = np.zeros((2, self.D, self.D)) # [z*z.T]_t
self.probs_cache = None
# self.pos_cache = None
@property
def number_of_gaussians(self):
return len(self.gaussian_weights)
def query(self, x: List[float]) -> Tuple[np.ndarray, np.ndarray]:
x = np.array(x)
gaussian_means_x = self.gaussian_means[:, :-1]
gaussian_covariances_xx = self.gaussian_covariances[:, :-1, :-1]
gaussian_covariances_xy = self.gaussian_covariances[:, :-1, -1]
gaussian_covariances_yx = self.gaussian_covariances[:, -1, :-1]
gaussian_covariances_yy = self.gaussian_covariances[:, -1, -1]
beta = self.gaussian_weights * self._multivariate_pdf(x, gaussian_means_x,
gaussian_covariances_xx)
beta = beta / np.sum(beta)
gaussian_covariances_xx_inv = np.linalg.inv(gaussian_covariances_xx)
gaussian_covariances_yx_xx_inv = np.sum(
gaussian_covariances_yx[:, None, :] * gaussian_covariances_xx_inv, axis=-1)
gaussian_means_y = self.gaussian_means[:, -1]
distances_to_mean = x - gaussian_means_x
means = gaussian_means_y + np.sum(gaussian_covariances_yx_xx_inv * distances_to_mean,
axis=-1)
mean = np.sum(beta * means)
variances = gaussian_covariances_yy - np.sum(
gaussian_covariances_yx_xx_inv * gaussian_covariances_xy, axis=-1)
variance = np.sum(beta * (variances + np.square(means - mean)))
return mean, variance
def update(self, x: List[float], y: float):
"""
Expectation-maximization
"""
position_vector = np.array(x + [y])
# E step - Calculate the activations
self.probs_cache = self._get_probs(position_vector)
self.pos_cache = position_vector
w = self.gaussian_weights * self.probs_cache
density = np.sum(w)
w = w / density
# M step - Update the parameters
current_value_zero_order = w
current_value_first_order = w[:, None] * position_vector[None, :]
current_value_second_order = w[:, None, None] * np.outer(position_vector, position_vector)[None, :]
# Time-dependent local adjusted forgetting
local_num_of_samples = self.num_samples * density * self.volume_eps
remind_factor = 1 - (1 - self.a) / (self.a * local_num_of_samples + self.b)
# weight-depending forgetting -> forgets ONLY when new information is provided
keep_previous_value_factor = np.power(remind_factor, w)
apply_new_value_factor = (1 - keep_previous_value_factor) / (1 - remind_factor)
self.sum_zero_order = keep_previous_value_factor * self.sum_zero_order + apply_new_value_factor * current_value_zero_order
self.sum_first_order = keep_previous_value_factor[:, None] * self.sum_first_order \
+ apply_new_value_factor[:,None] * current_value_first_order
self.sum_second_order = keep_previous_value_factor[:, None, None] * self.sum_second_order \
+ apply_new_value_factor[:, None, None] * current_value_second_order
self.num_samples = self.num_samples + 1
self._update_gaussians()
# Check whether new Gaussian is required
mu, std = self.query(x)
approx_error = (y - mu) ** 2
"""if approx_error >= self.error_threshold:
self.probs_cache = self._get_probs(position_vector)
density = np.sum(self.gaussian_weights * self.probs_cache)
if density <= self.density_threshold:
self.generate_gaussian(position_vector)
else:
self.probs_cache = None"""
def estimate_max(self, x: List[float]):
# get the maximum value and the corresponding action
action_list = np.linspace(-5, 5, 20)
q_list = []
mean_list = []
for action in action_list:
mean, variance= self.query([x[0], x[1], action])
# the probability distribution for q, p(q|s,a)
q_rand = np.random.normal(mean, np.sqrt(variance))
q_list.append(q_rand)
mean_list.append(mean)
best_action = action_list[q_list.index(max(q_list))]
max_mean = max(mean_list)
return best_action, max_mean
def _update_gaussians(self):
"""
Adjust the parameters of a GMM from samples
"""
self.gaussian_weights = self.sum_zero_order / np.sum(self.sum_zero_order)
self.gaussian_means = self.sum_first_order / self.sum_zero_order[:, None]
self.gaussian_covariances = self.sum_second_order / self.sum_zero_order[:, None, None] \
- self.gaussian_means[:, :, None] * self.gaussian_means[:, None,
:]
# regularization covariance matrix -> prevent singularity
w, _ = np.linalg.eig(self.gaussian_covariances)
cov_matrix = self.gaussian_covariances[-1, :, :]
min_w = np.amin(w)
while min_w < 1e-6:
reg_coef = 0.04
var = np.trace(cov_matrix) / (self.D)
var = max(var, 0.01)
self.gaussian_covariances = self.gaussian_covariances + reg_coef * np.square(var) * np.eye(self.D)[None, :]
w, _ = np.linalg.eig(self.gaussian_covariances)
min_w = np.amin(w)
self.probs_cache = None
# self.pos_cache = None
# def get_activations(self, position_vector: np.ndarray):
# w = np.array(
# [weight * self._multivariate_pdf(position_vector, mean, cov) for weight, mean, cov in
# zip(self.gaussian_weights, self.gaussian_means, self.gaussian_covariances)])
# w = w / np.sum(w)
# return w
# def get_probs(self, position_vector: np.ndarray):
# if self.pos_cache is not None:
# return self.probs_cache
def plot_gaussians(self, ax, facecolor='none', edgecolor='red', **kwargs):
for weight, mean, covariance in zip(self.gaussian_weights, self.gaussian_means,
self.gaussian_covariances):
pearson = covariance[0, 1] / np.sqrt(covariance[0, 0] * covariance[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
edgecolor=edgecolor)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(covariance[0, 0])
mean_x = mean[0]
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(covariance[1, 1])
mean_y = mean[1]
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
ax.add_artist(ellipse)
def generate_gaussian(self, position_vector: np.ndarray):
"""
New Gaussian initialization
"""
w_new = 0.95
zero_order_value = 1
sum_zero_density = np.sum(self.sum_zero_order * self.probs_cache)
# Initialization of Covariance Matrix of New Gaussian
C = 1 / np.sqrt(2 * np.pi) \
* np.power(
np.square((w_new / (1 - w_new)) * (sum_zero_density / zero_order_value)) * np.prod(
np.square(self.d))
, -1 / (2 * self.D))
new_mean = position_vector[None, :]
new_covariance = np.diag(np.square(C * self.d))[None, :]
new_zero_order_value = np.array([1.0])
new_first_order_value = position_vector[None, :]
new_second_order_value = new_covariance + np.outer(new_mean, new_mean)[None, :]
self.sum_zero_order = np.concatenate((self.sum_zero_order, new_zero_order_value))
self.sum_first_order = np.concatenate((self.sum_first_order, new_first_order_value))
self.sum_second_order = np.concatenate((self.sum_second_order, new_second_order_value))
self._update_gaussians()
def _get_probs(self, position_vector: np.ndarray):
return self._multivariate_pdf(position_vector, self.gaussian_means,
self.gaussian_covariances)
# def _pdf(self, position_vector: np.ndarray):
# error = position_vector - self.gaussian_means
# quadratic_form = np.sum(np.sum(error[:, None, :] * np.linalg.inv(self.gaussian_covariances), axis=-1) * error, axis=-1)
# result = np.power(2 * np.pi, - self.D / 2) / np.sqrt(np.linalg.det(self.gaussian_covariances)) * np.exp(
# -.5 * quadratic_form) + np.finfo(float).eps
# return result
def _multivariate_pdf(self, vector: np.ndarray, mean: np.ndarray, cov: np.ndarray):
"""
Source: https://stackoverflow.com/questions/15120662/compute-probability-over-a-multivariate-normal
:param vector:
:param mean:
:param cov:
:return:
"""
error = vector - mean
quadratic_form = np.sum(
np.sum(error[:, None, :] * np.linalg.inv(cov), axis=-1) * error,
axis=-1)
result = np.power(2 * np.pi, - self.D / 2) / np.sqrt(
np.linalg.det(cov)) * np.exp(
-.5 * quadratic_form) + np.finfo(float).eps
return result
def variable_resolution_q_learning():
env = InvertedPendulumEnvironment()
# initialize
Q_value_estimate = VariableResolutionApproximator()
state = (pi, 0)
action = random.choice(np.linspace(-5, 5, 20))
Q_value_estimate.data.state_action_dict[state] = list([action])
# loop
num_episodes = 100
num_iterations = 500
eps0 = 1.25
gamma = 0.96
accumulated_reward = []
for e in range(num_episodes):
if e % 1 == 0:
print("episode", e)
# Training phase
# observe current state s
env.reset()
for i in range(num_iterations):
# execute a and get reward, observe new state s'
env.state = state
next_state, reward = env.step(action)
# estimate Q_max
best_action, Q_max = Q_value_estimate.estimate_max((next_state, action))
q = reward + gamma * Q_max
# update
Q_value_estimate.update((state, action), q)
state = next_state
# select an action according to the greedy policy
action = best_action if random.random() > 1/(eps0 + 0.001 * i) else random.choice(np.linspace(-5, 5, 20))
#if e % 10 == 0 & i % 200 == 0:
# print(Q_value_estimate.query(((0, 0), 0)))
# Testing phase
env.reset()
test_action = random.choice(np.linspace(-5, 5, 20))
reward = 0
for i in range(num_iterations):
# execute the action
test_next_state, test_reward = env.step(test_action)
reward += test_reward
max_mean = -np.inf
best_test_next_action = 0
# select the best action based on the learned results
action_list = np.linspace(-5, 5, 20)
for test_next_action in action_list:
mean, _ = Q_value_estimate.query((test_next_state, test_next_action))
if mean > max_mean:
best_test_next_action = test_next_action
max_mean = mean
else:
pass
test_action = best_test_next_action
accumulated_reward.append(reward)
"""
# Animation
env.reset()
action = random.uniform(-5, 5)
for n in range(100):
# Apply the action
next_state, reward = env.step(action)
# Render the simulation if needed
env.render()
# Plot the rewards if needed
env.plot_reward()
# Sleep for the simulation time, if needed
env.wait_for_simulation()
# Choose the best action after the training
max_mean = -np.inf
best_test_next_action = 0
action_list = np.linspace(-5, 5, 100)
for next_action in action_list:
mean, _ = Q_value_estimate.query((next_state, next_action))
if mean > max_mean:
best_next_action = next_action
max_mean = mean
else:
pass
action = best_next_action
"""
return accumulated_reward
def gmm_q_learning():
env = InvertedPendulumEnvironment()
# initialize
Q_value_estimate = GMMApproximator(input_dim=3, error_threshold=100.0, density_threshold=1e-5, size_dimension=np.array([20, 20, 10, 50]), a=0.9, b=1)
state = (pi, 0)
action = random.choice(np.linspace(-5, 5, 20))
# loop
num_episodes = 100
num_iterations = 500
eps0 = 1.25
gamma = 0.85
accumulated_reward = []
for e in range(num_episodes):
if e % 1 == 0:
print("episode", e)
# Training phase
# observe current state s
env.reset()
for i in range(num_iterations):
# execute a and get reward, observe new state s'
env.state = state
next_state, reward = env.step(action)
# estimate Q_max
best_action, Q_max = Q_value_estimate.estimate_max([next_state[0], next_state[1], action])
q = reward + gamma * Q_max
# update
Q_value_estimate.update([state[0], state[1], action], q)
state = next_state
# select an action according to the greedy policy
action = best_action if random.random() > 1/(eps0 + 0.001 * i) else random.choice(np.linspace(-5, 5, 20))
# if e % 10 == 0 & i % 200 == 0:
# print(Q_value_estimate.query([0, 0, 0]))
# Testing phase
env.reset()
test_action = random.choice(np.linspace(-5, 5, 20))
reward = 0
for i in range(num_iterations):
# execute the action
test_next_state, test_reward = env.step(test_action)
reward += test_reward
max_mean = -np.inf
best_test_next_action = 0
# select the best action based on the learned results
action_list = np.linspace(-5, 5, 20)
for test_next_action in action_list:
mean, _ = Q_value_estimate.query([test_next_state[0], test_next_state[1], test_next_action])
if mean > max_mean:
best_test_next_action = test_next_action
max_mean = mean
else:
pass
test_action = best_test_next_action
print(f"\r Reward: {reward}")
if e % 1 == 0:
print(f"\r Test Episode: {e}, Number of Gaussians: {Q_value_estimate.number_of_gaussians}, reward: {reward}")
accumulated_reward.append(reward)
"""
# Animation
env.reset()
test_action = random.choice(np.linspace(-5, 5, 20))
reward = 0
for n in range(100):
# execute the action
test_next_state, test_reward = env.step(test_action)
# Render the simulation if needed
env.render()
# Plot the rewards if needed
env.plot_reward()
reward += test_reward
max_mean = -np.inf
best_test_next_action = 0
# select the best action based on the learned results
action_list = np.linspace(-5, 5, 20)
for test_next_action in action_list:
mean, _ = Q_value_estimate.query([test_next_state[0], test_next_state[1], test_next_action])
if mean > max_mean:
best_test_next_action = test_next_action
max_mean = mean
else:
pass
test_action = best_test_next_action
"""
return accumulated_reward
def exercise_1():
"""
Function approximation of sinus using GMM
"""
# Define variables
func_to_approximate = np.sin
input_interval = [-5, 5]
input_step = 0.1
mse_threshold = 5e-3
input_values_forth = np.arange(input_interval[0], input_interval[1] + input_step, input_step)
output_values_forth = [func_to_approximate(x_t) for x_t in input_values_forth]
input_values_back = np.flip(input_values_forth)
output_values_back = np.flip(output_values_forth)
epoch_swap_samples = [(input_values_back, output_values_back),
(input_values_forth, output_values_forth)]
# Initialize the approximator
approximator = GMMApproximator(input_dim=1, error_threshold=1e-3, density_threshold=0.1, size_dimension=np.array([10, 2]), a=0.9, b=1)
# Training loop
epoch_count = 0
mse_values = []
mse = None
y_pred = None
y_std = None
while True:
for input_swap_samples, output_swap_samples in epoch_swap_samples:
# Get observation
for x_t, y_t in zip(input_swap_samples, output_swap_samples):
# Update the approximator
# y_t += np.random.normal(0, 0.5)
approximator.update([x_t], y_t)
# Estimate the MSE
predictions = [approximator.query(x_t) for x_t in input_values_forth]
y_pred = np.array([prediction[0] for prediction in predictions])
y_std = np.array([prediction[1] for prediction in predictions])
mse = np.mean(np.square(output_values_forth - y_pred))
mse_values.append(mse)
epoch_count += 1
print(f"\r Epoch: {epoch_count}, MSE: {mse}, Number of Gaussians: {approximator.number_of_gaussians}")
if mse <= mse_threshold:
break
print(
f"\rFunction approximation with {approximator.__class__.__name__} finished in {epoch_count} iterations!")
print(f"Final MSE: {mse}")
# Plot the MSE evolution
plt.figure("MSE evolution")
plt.title = "Function approximation with GMM - MSE evolution"
plt.xlabel("iteration")
plt.ylabel("MSE")
plt.plot(mse_values)
# Plot the Gaussians
plt.figure("GMM Approximator")
ax = plt.gca()
ax.set_aspect('equal')
plt.title = "Gaussians of the GMM"
plt.xlabel("x")
plt.ylabel("y = sin(x)")
plt.plot(input_values_forth, output_values_forth)
approximator.plot_gaussians(ax)
# Plot the approximation
plt.figure("Function approximation with GMM")
ax = plt.gca()
ax.set_aspect('equal')
plt.title = "Function approximation with GMM"
plt.xlabel("x")
plt.ylabel("y = sin(x)")
plt.plot(input_values_forth, y_pred)
plt.plot(input_values_forth, output_values_forth)
plt.fill_between(input_values_forth, y_pred - y_std, y_pred + y_std, alpha=0.1)
plt.ioff()
plt.show()
def exercise_2():
set_seed(0)
reward1 = variable_resolution_q_learning()
set_seed(0)
reward2 = gmm_q_learning()
plt.figure("Inverted Pendulum Q-Learning with FA")
plt.title = "Function approximation Q-Learning - Rewards Evolution"
plt.xlabel("Number of Test Episodes")
plt.ylabel("Accumulated Rewards")
# plt.plot(reward1, 'b-', label='Variable Resolution')
plt.plot(reward2, 'r-', label='GMM')
plt.legend(loc='best')
plt.show()
if __name__ == "__main__":
set_seed(0)
# init_logger()
# variable_resolution_q_learning()
# exercise_1()
exercise_2()
|
import pyautogui
import time
import threading
import pandas as pd
import json
import os
import shutil
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
def _completa_cnpj(cnpj):
print(cnpj)
if len(str(cnpj)) < 14:
diferenca = 14 - len(str(cnpj))
cnpj_aux = cnpj
cnpj = str(0) * diferenca
cnpj += cnpj_aux
return cnpj
else:
return str(cnpj)
def _exclui_arquivos(search_dir, string):
"""Excluir arquivos com a string no nome e que estejam no path"""
saved_path = os.getcwd()
try:
os.chdir(search_dir)
except:
os.mkdir(search_dir)
return
files = filter(os.path.isfile, os.listdir(search_dir))
files = [os.path.join(search_dir, f) for f in files] # add path to each file
indices = [] #lista pra guardar indices de arquivos que contem string
i = 0
for file in files:
if string in file:
indices.append(i)
i += 1
for i in indices:
try:
os.remove(files[i])
except:
pass
os.chdir(saved_path)
def _esperar(segundos):
i = 0
while i < segundos:
time.sleep(1)
if i < segundos:
time.sleep(1)
i += 1
def _pegar_ultimo_arquivo_modificado(search_dir):
saved_path = os.getcwd()
os.chdir(search_dir)
files = filter(os.path.isfile, os.listdir(search_dir))
files = [os.path.join(search_dir, f) for f in files] # add path to each file
files.sort(key=lambda x: os.path.getmtime(x))
os.chdir(saved_path)
return files[-1]
def _converte_mes(mes):
if str(mes) == "Janeiro":
return "01"
elif str(mes) == "Fevereiro":
return "02"
elif str(mes) == "Março":
return "03"
elif str(mes) == "Abril":
return "04"
elif str(mes) == "Maio":
return "05"
elif str(mes) == "Junho":
return "06"
elif str(mes) == "Julho":
return "07"
elif str(mes) == "Agosto":
return "08"
elif str(mes) == "Setembro":
return "09"
elif str(mes) == "Outubro":
return "10"
elif str(mes) == "Novembro":
return "11"
elif str(mes) == "Dezembro":
return "12"
def _aceitar_notas(ano, mes, browser):
_esperar(2)
browser.switch_to.window(browser.window_handles[-1]) # Troca de janela
_esperar(2)
browser.find_element_by_xpath("//*[contains(text(), 'Serviços Tomados')]").click()
browser.find_element_by_xpath("//*[contains(text(), 'Declarar Notas Tomadas')]").click()
browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/form/div[2]/ul/li[2]").click()
browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/form/div[2]/div/div[2]/div[2]/input").click()
_esperar(4)
alert = browser.switch_to.alert # Mudar para o alerta
qtd_notas = [int(s) for s in str.split(alert.text) if s.isdigit()] # qtd de notas existentes na página
qtd_notas = qtd_notas[0]
alert.accept() # Aceitar o alerta
_esperar(2)
if qtd_notas > 0:
mes = _converte_mes(mes)
i = qtd_notas
while i >= 1:
_esperar(2)
aux = "/html/body/div[1]/section/div/div/div/form/div[2]/div/div[2]/div[1]/div[" + str(
i) + "]/div[5]/div[1]/select"
aux = browser.find_element_by_xpath(aux).text
mes_aux = aux[-2:]
ano_aux = aux[:4]
if mes_aux != mes or str(ano_aux) != str(ano): # se a nota não for da competência....
print(mes_aux, mes, ano_aux, ano, i)
browser.find_element_by_xpath(
"/html/body/div[1]/section/div/div/div/form/div[2]/div/div[2]/div[1]/div[" + str(
i) + "]/div[11]/input").click() # .... clicar em remover
i -= 1
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[3]/input")
try:
WebDriverWait(browser, 3).until(ec.alert_is_present())
browser.switch_to.alert.accept() # Aceitar o alerta
except:
pass
def _clicar_pelo_XPATH(browser, xpath):
time.sleep(1)
button = browser.find_element_by_xpath(xpath)
ActionChains(browser).move_to_element(button).click(button).perform()
def _relatorio_tomados(nome_pasta, ano, mes, browser):
path = "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Contábil\\NFSe tomados\\"
mes_aux = _converte_mes(mes)+"."+ano
path += nome_pasta + "\\" + ano + "\\" + mes_aux
try: # Tenta criar a pasta da competência
os.mkdir(path)
except FileNotFoundError: # Caso não exista a pasta com o ano...
try: # ... tenta criar a pasta com o ano e depois a pasta da competência, dentro da pasta do ano.
path = "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Contábil\\NFSe tomados\\"
path += nome_pasta + "\\" + ano
os.mkdir(path)
path += "\\" + mes_aux
os.mkdir(path)
except FileNotFoundError: # Se não conseguir criar a pasta do ano, é pq não existe a pasta da empresa
return "Não existe pasta"
except FileExistsError: # Se a pasta já existir, continua
pass
_clicar_pelo_XPATH(browser, "//*[contains(text(), 'Livro Digital')]") # Clicar em Livro Digital
browser.find_elements_by_xpath("//*[contains(text(), 'Livro Digital')]")[1].click() # Clicar em Livro Digital, de novo (nao posso usar a funcao para clicar por casa da ambiguidade)
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form[2]/input[3]") # Clicar em Gerar Novo Livro Digital
browser.find_element_by_id("tipodec").find_element_by_xpath("//option[@value='T']").click() # Selecionar livros Tomados
if "FECHAR O LIVRO E GERAR A O DAM" in str(browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/div/form/div[3]/div/select").text):
dam = True
else:
dam = False
elem = browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/div/form/div[4]/div/span")
_esperar(5)
aux = str(elem.get_attribute('style'))[str(str(elem.get_attribute('style'))).find(":")+2:-1] # Descobrir se o livro tá aberto ou fechado
# try: # Livro está aberto
if aux == "none": # Livro está aberto
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/div/form/div[6]/div/input") # Clicar em Gerar
_esperar(5)
try:
browser.switch_to.alert.accept() # Aceitar o alerta
browser.switch_to.alert.accept() # Aceitar o alerta
except:
pass
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
_esperar(5)
browser.execute_script('window.print();')
_esperar(5)
browser.close()
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\" + os.getlogin() + "\Downloads")
destino = path
try:
shutil.move(origem, destino)
except:
pass
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
if dam == True:
_esperar(5)
browser.execute_script('window.print();')
_esperar(5)
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\"+os.getlogin()+"\Downloads")
destino = path
try:
shutil.move(origem, destino)
except:
os.remove(origem)
return "Arquivos Salvos"
# except: # Livro está fechado
elif aux == "inline": # Livro está fechado
competencia = str(ano) + '-' + str(_converte_mes(mes))
try:
_clicar_pelo_XPATH(browser, "//*[contains(text(), 'Relatórios')]") # Clicar em Relatórios
_clicar_pelo_XPATH(browser, "//*[contains(text(), 'Relatório de Notas Aceitas')]") # Clicar em Relatórios de Notas Aceitas
browser.find_elements_by_xpath("//option[@value='" + competencia + "']")[1].click() # Selecionar oeríodo final
browser.find_element_by_xpath("//option[@value='" + competencia + "']").click() # Selecionar período inicial
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[3]/div/input") # Clicar em Gerar Relatório
except: # Eh comercio
_clicar_pelo_XPATH(browser, "/html/body/div[1]/div[1]/div[2]/div/div[2]/div/nav/ul/li[3]/a") # Clicar em Livro Digital
_clicar_pelo_XPATH(browser, "/html/body/div[1]/div[1]/div[2]/div/div[2]/div/nav/ul/li[3]/ul/li[1]/a") # Clicar em Livro Digital de novo
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form[2]/div[1]/input") # Clicar em Procurar
browser.find_elements_by_xpath("//option[@value='" + competencia + "']")[1].click() # Selecionar oeríodo final
browser.find_element_by_xpath("//option[@value='" + competencia + "']").click() # Selecionar período inicial
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form[2]/div[2]/input[1]") # Clicar em Procurar
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form[2]/table/tbody[3]/tr[1]/td[2]/a") # Clicar no primeiro resultado
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
browser.execute_script('window.print();')
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\"+os.getlogin()+"\Downloads")
destino = path
try:
shutil.move(origem, destino)
except:
os.remove(origem)
return "Arquivos Salvos"
def _pdf_notas_tomados(nome_contabil, ano, mes, browser):
path = "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Contábil\\NFSe tomados\\"
mes_aux = _converte_mes(mes) + "." + ano
path += nome_contabil + "\\" + ano + "\\" + mes_aux
#browser.switch_to.window(browser.window_handles[-1])
competencia = str(ano)+'-'+str(_converte_mes(mes))
browser.find_element_by_xpath("//*[contains(text(), 'Serviços Tomados')]").click()
browser.find_element_by_xpath("//*[contains(text(), 'Notas Tomadas no Municipio')]").click()
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[1]/input") # Campo de Procurar
browser.find_element_by_id("ListGeneratorFind_periodoTrib").find_element_by_xpath("//option[@value='"+competencia+"']").click() # Selecionar competencia
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[2]/input[8]") # Botão Procurar
_esperar(2)
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/table/thead/tr/th[1]/input") # Marcar caixinha
try:
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/table/tbody[1]/tr/td/a") # Selecionar todos
except:
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
return False
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form[1]/input[3]") # PDF
_esperar(5)
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
_esperar(5)
browser.execute_script('window.print();')
_esperar(2)
browser.close()
browser.switch_to.window(browser.window_handles[-1])
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\"+os.getlogin()+"\Downloads")
os.rename(origem, "E:\\Users\\thiago.madeira\\Downloads\\Notas.pdf")
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\" + os.getlogin() + "\Downloads")
destino = path
try:
shutil.move(origem, destino)
except:
os.remove(origem)
return True
def _exportar_notas_fiscais_tomados(nome_contabil, ano, mes, browser):
path = "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Contábil\\NFSe tomados\\"
path += nome_contabil
competencia = str(ano)+'-'+str(_converte_mes(mes))
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[1]/input") # Campo de Procurar
browser.find_element_by_id("ListGeneratorFind_periodoTrib").find_element_by_xpath("//option[@value='"+competencia+"']").click() # Selecionar competencia
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[2]/input[8]") # Botão Procurar
_esperar(3)
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/table/thead/tr/th[1]/input") # Marcar caixinha
try:
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/table/tbody[1]/tr/td/a") # Selecionar todos
except:
return
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/table/tbody[1]/tr/td/a") # Selecionar todos
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form[1]/input[7]") # Exportar Xml Abrasf 2.02
_esperar(3)
browser.switch_to.window(browser.window_handles[-1])
browser.close()
browser.switch_to.window(browser.window_handles[-1])
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\"+os.getlogin()+"\Downloads")
destino = path
_exclui_arquivos(destino, ".xml")
try:
shutil.move(origem, destino)
except:
os.remove(origem)
def _relatorio_prestados(nome_pasta, ano, mes, browser):
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
elem = None
try: # Verificar se eh comercio
elem = browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/p")
if elem != None:
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
return "Eh comércio"
except:
pass
desconsolidada = False
if browser.find_element_by_xpath("/html/body/div[1]/div[1]/div[3]/div/div[1]/div[2]").text == "DES CONSOLIDADA":
desconsolidada = True
path = "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Fiscal\Impostos\Federais\Empresas\\"
mes_aux = _converte_mes(mes)+"."+ano
path += nome_pasta + "\\" + ano + "\\" + mes_aux
try:
os.mkdir(path)
except FileNotFoundError:
try:
path = "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Fiscal\Impostos\Federais\Empresas\\"
path += nome_pasta + "\\" + ano
os.mkdir(path)
path += "\\" + mes
if path[-1] == " ":
path = path[:-1]
os.mkdir(path)
except FileNotFoundError:
return "Não existe pasta"
except FileExistsError:
pass
_clicar_pelo_XPATH(browser, "//*[contains(text(), 'Livro Digital')]") # Clicar em Livro Digital
browser.find_elements_by_xpath("//*[contains(text(), 'Livro Digital')]")[1].click() # Clicar em Livro Digital, de novo (nao posso usar a funcao para clicar por casa da ambiguidade)
time.sleep(1)
_clicar_pelo_XPATH(browser,
"/html/body/div[1]/section/div/div/div/form[2]/input[3]") # Clicar em Gerar Novo Livro Digital
time.sleep(1)
elem = browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/div/form/div[4]/div/span")
# Gambiarra para forçar espera
i = 0
while i < 5:
if i < 5:
time.sleep(1)
i += 1
aux = str(elem.get_attribute('style'))[
str(str(elem.get_attribute('style'))).find(":") + 2:-1] # Descobrir se o livro tá aberto ou fechado
if aux == "none": # Livro está aberto
#try:
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/div/form/div[6]/div/input") # Clicar em Gerar
WebDriverWait(browser, 3).until(ec.alert_is_present())
browser.switch_to.alert.accept() # Aceitar o alerta
browser.switch_to.alert.accept() # Aceitar o alerta
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
browser.execute_script('window.print();')
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
if desconsolidada == True:
browser.execute_script('window.print();')
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\"+os.getlogin()+"\Downloads")
destino = path
try:
shutil.move(origem, destino)
except:
pass
return "Arquivos Salvos"
elif aux == "inline": # Livro está fechado
#except:
_clicar_pelo_XPATH(browser, "//*[contains(text(), 'Relatórios')]") # Clicar em Relatórios
# _clicar_pelo_XPATH(browser, "//*[contains(text(), 'Relatório de NFS’e Emitidas')]") # Clicar em Relatórios
_clicar_pelo_XPATH(browser, "/html/body/div[1]/div[1]/div[2]/div/div[2]/div/nav/ul/li[8]/ul/li[1]/a") # Clicar em Relatórios
competencia = str(ano) + '-' + str(_converte_mes(mes))
time.sleep(1)
browser.find_elements_by_xpath("//option[@value='" + competencia + "']")[1].click() # Selecionar oeríodo final
browser.find_element_by_xpath("//option[@value='" + competencia + "']").click() # Selecionar período inicial
time.sleep(1)
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[2]/div/input") # Clicar em Gerar Relatório
# Salvar pdf do relatório de notas
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
browser.execute_script('window.print();')
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")("C:\\Users\\"+os.getlogin()+"\Downloads")
destino = path
try:
shutil.move(origem, destino)
except:
pass
return "Arquivos Salvos"
def _exportar_notas_fiscais_prestados(nome_contabil, browser, ano, mes):
path = "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Contábil\\NFSe\\"
path += nome_contabil
_clicar_pelo_XPATH(browser, "//*[contains(text(), 'Notas Eletrônicas')]") # Clicar em Notas Eletrônicas
_clicar_pelo_XPATH(browser, "//*[contains(text(), 'Exportar Notas')]") # Clicar em Exportar Notas
mes = int(_converte_mes(mes))
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[1]/div[1]/div/select")
browser.find_element_by_xpath("//option[@value='" + str(mes) + "']").click() # Selecionar mes
browser.find_element_by_xpath("//option[@value='" + str(ano) + "']").click() # Selecionar ano
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[2]/div/input[4]") # Clicar em Exportar XML Abrasf 2.0
_esperar(2)
browser.close()
browser.switch_to.window(browser.window_handles[0])
browser.switch_to.window(browser.window_handles[-1])
origem = _pegar_ultimo_arquivo_modificado("E:\\Users\\thiago.madeira\\Downloads")#("C:\\Users\\"+os.getlogin()+"\Downloads")
destino = path
_exclui_arquivos(destino, ".xml")
if ".xml" in str(origem):
shutil.move(origem, destino)
return "Arquivos Salvos"
else:
return "Sem Movimento"
def _selecionar_certificado():
time.sleep(1)
#pyautogui.press('down')
pyautogui.press('enter')
def _tomados(ano, mes, dfs):
### BEGIN - Configuração para salvar o pdf ###
appState = {
"recentDestinations": [
{
"id": "Save as PDF",
"origin": "local",
"account": ""
}
],
"selectedDestinationId": "Save as PDF",
"version": 2
}
profile = {'printing.print_preview_sticky_settings.appState': json.dumps(appState),
'safebrowsing.enabled': 'false',
# 'savefile.default_directory': 'C:\\Users\\thiago.madeira\Desktop\\temp'
}
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option('prefs', profile)
chrome_options.add_argument('--kiosk-printing')
### END - Configuração para salvar o pdf ###
browser = webdriver.Chrome(options=chrome_options,
executable_path= "P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Sistemas Internos\Fechar Livros\chromedriver.exe")
browser.implicitly_wait(1)
thread1 = threading.Thread(target=_selecionar_certificado)
thread1.start()
browser.get('https://nfse.pjf.mg.gov.br/site/')
browser.maximize_window()
browser.get('https://nfse.pjf.mg.gov.br/contador/login.php')
_clicar_pelo_XPATH(browser, "/html/body/div[1]/div/div[2]/div/div[2]/div/nav/ul/li[1]/a/span[1]") # Utilitários
_clicar_pelo_XPATH(browser, "/html/body/div[1]/div/div[2]/div/div[2]/div/nav/ul/li[1]/ul/li[5]/a") # Visitar Cliente
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[1]/input") # Campo de Procurar
for df in dfs:
df = pd.read_csv(df, encoding='latin-1', sep=';', converters={'CNPJ': lambda x: str(x)})
for index in df.index:
cnpj = _completa_cnpj(df.at[index, df.columns[1]])
nome_pasta = df.at[index, df.columns[0]]
if nome_pasta[-1] == " ": # Remover espaço em branco ao final
nome_pasta = nome_pasta[:-1]
browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/form/div[2]/input[1]").clear()
browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/form/div[2]/input[1]").send_keys(str(cnpj)) # Digitar CNPJ
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[2]/input[5]") # Clicar em Procurar
_esperar(2)
# Clicar no primeiro resultado
nao_cadastrado = False
deu = False
while deu == False:
try:
_clicar_pelo_XPATH(browser,
"/html/body/div[1]/section/div/div/div/form/table/tbody[3]/tr[1]/td[2]/a")
deu = True
except:
_esperar(1)
try:
if browser.find_element_by_xpath(
"/html/body/div[1]/section/div/div/div/form/table/tbody[3]/tr/td").text == "Não foram encontrados resultados que atendem à sua pesquisa.":
nao_cadastrado = True
break
except:
pass
if nao_cadastrado == True:
_esperar(2)
file = open("relatorio_tomados_" + mes + "-" + ano + ".txt", "a+")
file.write(nome_pasta + " - " + "Não cadastrado no site" + "\n")
file.close()
else:
_aceitar_notas(ano, mes, browser)
aux = _relatorio_tomados(nome_pasta, ano, mes, browser) # tomados
tem_nota = _pdf_notas_tomados(nome_pasta, ano, mes, browser) # tomados
if tem_nota:
_exportar_notas_fiscais_tomados(nome_pasta, ano, mes, browser) # tomados
else:
aux += " (sem movimento)"
file = open("relatorio_tomados_" + mes + "-" + ano + ".txt", "a+")
file.write(nome_pasta + " - " + aux + "\n")
file.close()
def _prestados(ano, mes, dfs):
### BEGIN - Configuração para salvar o pdf ###
appState = {
"recentDestinations": [
{
"id": "Save as PDF",
"origin": "local",
"account": ""
}
],
"selectedDestinationId": "Save as PDF",
"version": 2
}
profile = {'printing.print_preview_sticky_settings.appState': json.dumps(appState),
'safebrowsing.enabled': 'false',
#'savefile.default_directory': 'C:\\Users\\thiago.madeira\Desktop\\temp'
}
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option('prefs', profile)
chrome_options.add_argument('--kiosk-printing')
### END - Configuração para salvar o pdf ###
browser = webdriver.Chrome(options=chrome_options,
executable_path="P:\documentos\OneDrive - Novus Contabilidade\Doc Compartilhado\Sistemas Internos\Fechar Livros\chromedriver.exe")
browser.implicitly_wait(1)
thread1 = threading.Thread(target=_selecionar_certificado)
thread1.start()
browser.get('https://nfse.pjf.mg.gov.br/site/')
browser.maximize_window()
browser.get('https://nfse.pjf.mg.gov.br/contador/login.php')
_clicar_pelo_XPATH(browser, "/html/body/div[1]/div/div[2]/div/div[2]/div/nav/ul/li[1]/a/span[1]") # Utilitários
try:
_clicar_pelo_XPATH(browser, "/html/body/div[4]/div/input") # Aviso da Prefeitura sobre coronavirus
except:
pass
_clicar_pelo_XPATH(browser, "/html/body/div[1]/div/div[2]/div/div[2]/div/nav/ul/li[1]/ul/li[5]/a") # Visitar Cliente
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[1]/input") # Campo de Procurar
for df in dfs:
df = pd.read_csv(df, encoding='latin-1', sep=';', converters={'CNPJ': lambda x: str(x)})
for index in df.index:
cnpj = _completa_cnpj(df.at[index, df.columns[1]])
nome_pasta = df.at[index, df.columns[0]]
if nome_pasta[-1] == " ": # Remover espaço em branco ao final
nome_pasta = nome_pasta[:-1]
browser.switch_to.window(browser.window_handles[0])
browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/form/div[2]/input[1]").clear()
browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/form/div[2]/input[1]").send_keys(str(cnpj)) # Digitar CNPJ
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/div[2]/input[5]") # Clicar em Procurar
file = open("relatorio_prestados_" + mes + "-" + ano + ".txt", "a+")
# Clicar no primeiro resultado
nao_cadastrado = False
deu = False
while deu == False:
try:
_clicar_pelo_XPATH(browser, "/html/body/div[1]/section/div/div/div/form/table/tbody[3]/tr[1]/td[2]/a")
deu = True
except :
time.sleep(1)
try:
if browser.find_element_by_xpath("/html/body/div[1]/section/div/div/div/form/table/tbody[3]/tr/td").text == "Não foram encontrados resultados que atendem à sua pesquisa.":
nao_cadastrado = True
break
except:
pass
if nao_cadastrado == True:
file.write(nome_pasta + " - " + "Não cadastrado no site" + "\n")
file.close()
else:
aux = _relatorio_prestados(nome_pasta, ano, mes, browser)
print (aux)
if aux == "Arquivos Salvos":
aux = _exportar_notas_fiscais_prestados(nome_pasta, browser, ano, mes)
file.write(nome_pasta + " - " + aux + "\n")
file.close()
else:
file.write(nome_pasta + " - " + aux + "\n")
file.close() |
getal1 = float(input("Geef een getal in: "))
getal2 = float(input("Geef een getal in: "))
if getal1 < getal2:
kleinste_getal = getal1
grootste_getal = getal2
else:
kleinste_getal = getal2
grootste_getal = getal1
if kleinste_getal == 0:
antwoord = "Je kan niet delen door nul"
else:
antwoord = grootste_getal / kleinste_getal
print("Het kleinste getal is", kleinste_getal, "Het kwadraat van het kleinste getal is", kleinste_getal**2,"Het grootste getal gedeeld door het kleinste getal is", antwoord)
|
from shared.common_query import FilterableMixin, ArithmeticOperable, Comparable
class Aggregation:
pass
class Count(FilterableMixin, ArithmeticOperable, Comparable, Aggregation):
def reducer(self, queryset):
return len(list(queryset))
class Sum(FilterableMixin, ArithmeticOperable, Comparable, Aggregation):
def reducer(self, queryset):
return sum(queryset.compiler.get_value(object, self.field) for object in queryset)
class Has(FilterableMixin, ArithmeticOperable, Comparable, Aggregation):
def reducer(self, queryset):
return any(list(queryset))
class Mean(FilterableMixin, ArithmeticOperable, Comparable, Aggregation):
def reducer(self, queryset):
result = Sum(self.field).where(self.query).reducer(queryset)
result /= Count(self.field).where(self.query).reducer(queryset)
return result
class Median(FilterableMixin, ArithmeticOperable, Comparable, Aggregation):
def reducer(self, queryset):
values = list(sorted(queryset.compiler.get_value(object, self.field) for object in queryset))
length = len(values)
if length == 0:
return None
if length % 2 == 0:
right = length // 2
left = right - 1
right = values[right]
left = values[left]
return sum([right, left]) / 2
return values[length // 2]
class Collect(FilterableMixin, ArithmeticOperable, Comparable, Aggregation):
def reducer(self, queryset):
result = []
for object in queryset:
result.append(queryset.compiler.get_value(object, self.field))
return result
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .job_output import JobOutput
class JobOutputAsset(JobOutput):
"""Represents an Asset used as a JobOutput.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar error: If the JobOutput is in the Error state, it contains the
details of the error.
:vartype error: ~azure.mgmt.media.models.JobError
:ivar state: Describes the state of the JobOutput. Possible values
include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing',
'Queued', 'Scheduled'
:vartype state: str or ~azure.mgmt.media.models.JobState
:ivar progress: If the JobOutput is in a Processing state, this contains
the job completion percentage. The value is an estimate and not intended
to be used to predict job completion times. To determine if the JobOutput
is complete, use the State property.
:vartype progress: int
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param asset_name: Required. The name of the output Asset.
:type asset_name: str
"""
_validation = {
'error': {'readonly': True},
'state': {'readonly': True},
'progress': {'readonly': True},
'odatatype': {'required': True},
'asset_name': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'JobError'},
'state': {'key': 'state', 'type': 'JobState'},
'progress': {'key': 'progress', 'type': 'int'},
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'asset_name': {'key': 'assetName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(JobOutputAsset, self).__init__(**kwargs)
self.asset_name = kwargs.get('asset_name', None)
self.odatatype = '#Microsoft.Media.JobOutputAsset'
|
#https://open.kattis.com/problems/heartrate
n=int(input())
for _ in range(n):
b,p=list(map(float,input().split()))
bmin=60/(p/(b-1))
bpm=60*b/p
bmax=60/(p/(b+1))
print(float(round(bmin,4)),float(round(bpm,4)),float(round(bmax,4)))
|
import sys
from MainWidget import MainWidget
from PyQt5 import QtWidgets
def exception_hook(exctype, value, traceback):
sys._excepthook(exctype, value, traceback)
sys.exit(1)
def main():
sys._excepthook = sys.excepthook
sys.excepthook = exception_hook
app = QtWidgets.QApplication(sys.argv)
widget = MainWidget()
widget.show()
try:
app.exec_()
except:
print('exiting')
if __name__ == '__main__':
main() |
from django import forms
class CookieCutterForm(forms.Form):
repo_name = forms.CharField(label="Repo name")
def __init__(self, cookie, *args, **kwargs):
super(CookieCutterForm, self).__init__(*args, **kwargs)
self.cookie = cookie
if cookie.options is None:
return
for opt, default in cookie.options.items():
field = forms.CharField(initial=default, required=False,
label=opt.replace('_', ' ').capitalize())
self.fields[opt] = field
def clean(self):
data = super(CookieCutterForm, self).clean()
data.update({k: self.cookie.options.get(k)
for k in data if data[k] is None})
return data
@property
def use_github(self):
return '_github' in self.data
|
from skimage.filters import gabor_kernel
from skimage import io
from skimage.transform import resize
from matplotlib import pyplot as plt
import numpy as np
import math
def get_gabor_filters(inchannels, outchannels, kernel_size=(3, 3)):
delta = 1e-4
freqs = (math.pi / 2) * math.sqrt(2) ** (-np.random.randint(0, 5, (outchannels, inchannels)))
thetas = (math.pi / 8) * np.random.randint(0, 8, (outchannels, inchannels))
sigmas = math.pi / freqs
psis = math.pi * np.random.rand(outchannels, inchannels)
x0, y0 = np.ceil(np.array(kernel_size) / 2)
y, x = np.meshgrid(
np.linspace(-x0 + 1, x0 + 0, kernel_size[0]),
np.linspace(-y0 + 1, y0 + 0, kernel_size[1]),
)
filterbank = []
for i in range(outchannels):
for j in range(inchannels):
freq = freqs[i][j]
theta = thetas[i][j]
sigma = sigmas[i][j]
psi = psis[i][j]
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = np.exp(
-0.5 * ((rotx ** 2 + roty ** 2) / (sigma + delta) ** 2)
)
g = g * np.cos(freq * rotx + psi)
g = g / (2 * math.pi * sigma ** 2)
g = gabor_kernel(frequency=freq, bandwidth=sigma, theta=theta, n_stds=0).real
filterbank.append(g)
return filterbank
filterbank = get_gabor_filters(3, 64, (3, 3))
fig = plt.subplots(12, 16, figsize=(22, 22))
for i, gf in enumerate(filterbank):
plt.subplot(12, 16, i + 1)
plt.imshow(gf, cmap='gray')
plt.axis('off') |
import io
import time
import zipfile
from .zlibstream import ZlibStream
class _ZipInfoFacade(zipfile.ZipInfo):
'''
Make some fields read-only for wrapped zipfile.ZipInfo
'''
__readonly__ = (
'CRC',
'compress_size',
'file_size',
)
def __init__(self, wrapped_info):
self.__dict__['_wrapped'] = wrapped_info
def __getattr__(self, attr):
return getattr(self._wrapped, attr)
def __setattr__(self, attr, value):
if attr not in self.__readonly__:
setattr(self._wrapped, attr, value)
class ZipStream(object):
def __init__(self, compression=6):
self.compression = compression
self._files = {}
self._buf = io.BufferedRandom(io.BytesIO())
self._zip = zipfile.ZipFile(self._buf, mode='w')
self._res = None # for cached result
def write(self, name, data):
n = name.lower()
self._files.setdefault(n, ZlibStream(self.compression)).write(data)
def store(self, name):
name = name.lower()
zinfo = zipfile.ZipInfo(filename=name, date_time=time.localtime(time.time())[:6])
zinfo.external_attr = 0o600 << 16 # from ZipFile.writestr
zinfo.compress_type = zipfile.ZIP_DEFLATED
zstream = self._files[name]
self._files[name] = None # prevent name collision (instead just of .pop())
zinfo.CRC = zstream.crc32
zinfo.file_size = zstream.src_len
data = zstream.read()
zstream.close()
zstream = None # make it ready for gc
zinfo.compress_size = len(data)
ro_info = _ZipInfoFacade(zinfo)
'''
Monkey-patch _get_compressor, this is bad, i know.
But this is only way to make ZipFile work with already
compressed stream.
'''
fn = zipfile._get_compressor
zipfile._get_compressor = lambda c: None
self._zip.writestr(ro_info, data)
'''
Restore saved handler
'''
zipfile._get_compressor = fn
# for debug purpose only
return ro_info
def read(self):
if self._res is None:
self._zip.close()
b = self._buf
b.seek(0, io.SEEK_SET)
self._res = b.read()
b.close()
return self._res
|
import os, sys
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import pandas as pd
import numpy as np
eps = 0.004
desired = {
0: 0.36239782,
1: 0.043841336,
2: 0.075268817,
3: 0.059322034,
4: 0.075268817,
5: 0.075268817,
6: 0.043841336,
7: 0.075268817,
8: eps,
9: eps,
10: eps,
11: 0.043841336,
12: 0.043841336,
13: 0.014198783,
14: 0.043841336,
15: eps,
16: 0.028806584,
17: 0.014198783,
18: 0.028806584,
19: 0.059322034,
20: eps,
21: 0.126126126,
22: 0.028806584,
23: 0.075268817,
24: eps,
25: 0.222493888,
26: 0.028806584,
27: eps
}
best_sub = pd.read_csv('best_sub.csv')
s0 = [s if isinstance(s, str) else '' for s in best_sub.Predicted]
p0 = [s.split() for s in s0]
y0 = np.zeros((best_sub.shape[0], 28)).astype(int)
for i in range(best_sub.shape[0]):
for j in p0[i]: y0[i, int(j)] = 1
for i in range(28):
desired[i] = y0[:,i].mean()
MODEL_PATH = 'Christof/models/GAPNet/13_ext_512crop/'
oof_df = pd.read_csv(MODEL_PATH + 'oof_pred_ul.csv')
oof1 = pd.read_csv(MODEL_PATH + 'oof_pred_ul_40_40.csv')
oof2 = pd.read_csv(MODEL_PATH + 'oof_pred_ur_40_40.csv')
oof3 = pd.read_csv(MODEL_PATH + 'oof_pred_mm_40_40.csv')
oof4 = pd.read_csv(MODEL_PATH + 'oof_pred_bl_40_40.csv')
oof5 = pd.read_csv(MODEL_PATH + 'oof_pred_br_40_40.csv')
oof1 = oof1[oof1.columns[3:]].values
oof2 = oof2[oof2.columns[3:]].values
oof3 = oof3[oof3.columns[3:]].values
oof4 = oof4[oof4.columns[3:]].values
oof5 = oof5[oof5.columns[3:]].values
draw_predict1 = np.mean([oof1,oof2,oof3,oof4,oof5],axis = 0)
np.save(MODEL_PATH + f'oof.npy',draw_predict1)
# custom thresholds to match lb proportions
thresholds = np.linspace(0.95, 0.05, 101)
pred = draw_predict1.copy()
for j in range(pred.shape[1]):
for t in thresholds:
pred[:, j] = (draw_predict1[:, j] > t).astype(int)
prop = np.mean(pred[:, j])
if prop >= desired[j]: break
print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )
ls = oof_df['Target'].values
def str2y(item):
np_labels = np.zeros((28,))
labels = item.split(' ')
int_labels = [int(label) for label in labels]
np_labels[int_labels] = 1
return np_labels
ls2 = [str2y(item) for item in ls]
ls2 = np.array(ls2)
from sklearn.metrics import f1_score
f1_score(ls2,pred, average='macro')
from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, Lambda, Concatenate
import keras.backend as K
from keras.models import Model
def build_stacker():
inp = Input((5,28,))
x = Flatten()(inp)
x2 = Lambda(lambda x: K.mean(x,axis=1))(inp)
x3 = Lambda(lambda x: K.std(x, axis=1))(inp)
x4 = Lambda(lambda x: K.max(x, axis=1))(inp)
x = Concatenate()([x,x2,x3,x4])
#x = BatchNormalization()(x)
x = Dense(256, activation='relu')(x)
#x = Dense(64, activation='relu')(x)
#x = Dense(256, activation='relu')(x)
x = Dropout(0.3)(x)
out = Dense(28, activation='sigmoid')(x)
m = Model(inp,out)
m.compile(optimizer='adam',loss='binary_crossentropy')
return m
X = np.stack([oof1,oof2,oof3,oof4,oof5])
X = np.transpose(X, (1, 0, 2))
y = ls2
from sklearn.model_selection import KFold
kf = KFold(n_splits=5, random_state=23)
s_id = -1
p2 = np.zeros((X.shape[0],28))
for tr_ind, val_ind in kf.split(X,y,):
s_id +=1
X_train = X[tr_ind]
y_train = y[tr_ind]
X_val = X[val_ind]
y_val = y[val_ind]
m = build_stacker()
m.fit(X_train,y_train,validation_data=(X_val,y_val),epochs=20, batch_size=128)
m.save(MODEL_PATH + f'stacker{s_id}_40_stats.hdf5')
p2[val_ind] = m.predict(X_val)
pred_val = np.mean(X, axis = 1)
thresholds = np.linspace(0.95, 0.05, 101)
pred = pred_val.copy()
for j in range(pred.shape[1]):
for t in thresholds:
pred[:, j] = (pred_val[:, j] > t).astype(int)
prop = np.mean(pred[:, j])
if prop >= desired[j]: break
print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )
f1_score(y,pred,average='macro')
#p2 = m.predict(X_val)
thresholds = np.linspace(0.95, 0.05, 101)
pred = p2.copy()
for j in range(pred.shape[1]):
for t in thresholds:
pred[:, j] = (p2[:, j] > t).astype(int)
prop = np.mean(pred[:, j])
if prop >= desired[j]: break
print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )
f1_score(y,pred,average='macro')
|
TechT = ['TENNANT','MILLER','MAYER','WIESER','LORD-MAY',"BISHOP","LAU","CHAN","MABIOR"]
ElementT = ['Mo','Cu','B','U','Fe','Zn','Sr','S']
ContractT= ['RESEARCH','AGAT','GRASBY','SPENCER','MIRNA','MONCUR',"LORENZ","OMID"]
TypeT =['SAMPLE','STANDARD','BLANK']
SpikeT = ['NONE','SINGLE','DOUBLE','E_DOPING']
Machine = ["NEPTUNE","TRITION"]
Tech = sorted(TechT)
Element = sorted(ElementT)
Contract = sorted(ContractT)
Type = sorted(TypeT)
Spike = sorted(SpikeT)
#To make sure these options are always last
Tech = Tech + ["UNKNOWN"]
Contract = Contract +["UNKNOWN"]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 22:51:27 2020
@author: python MATLAB & GIS
"""
import datetime
import subprocess
import os
import time
from moviepy.editor import VideoFileClip
current_dir = os.getcwd()
print(current_dir)
def main():
#define clipping information
start_point = 0
duration_sec = 1500
overlap_duration = 4
clipped_length = duration_sec + overlap_duration
#########################################################################################################
#looping through all files and clip
print('\n\n\n'+100*'#')
#jumpcut_suffix = input('Input jumcut suffix (eg. _margin1- this have to be the same with output for jumpcut): ')
#---------------------------------------------------------------------------------------------------
#-------change jumpcut_suffix before running %%f_jcmg1.mp4
jumpcut_suffix = '.mp4_jcmg1'
# this will be use for merging later
for idx,inFile in enumerate(nameList_F_withExt(current_dir,'*.wmv')):
print(100*'-')
#print(f'Input File Id No: {idx}, name: {inFile}')
# inFile = '2_20120830_matlab3_ValSchmidt.mp4'
in_name,in_ext = os.path.splitext(inFile)
out_ffmpeg_merge_txtf = open(in_name+'.txt','w')
inFile_length = clip_duration(inFile)
if inFile_length <= duration_sec+600:
pass
print(f'This video is short, no clipping required, length: {inFile_length} sec or {get_strtime(inFile_length)}')
else:
clip_number = int(inFile_length/duration_sec)+1
#%%
for i in range(clip_number):
outFile = in_name+ '_split' + str(i+1) + '.mp4'
msg = 'file '+ "'"+in_name+ '_split' + str(i+1) + jumpcut_suffix +'.mp4'+"'"
out_ffmpeg_merge_txtf.write(msg+'\n')
for i in range(clip_number):
outFile = in_name+ '_split' + str(i+1) + '.mp4'
msg = 'file '+ "'"+in_name+ '_split' + str(i) + jumpcut_suffix +'.mp4'+"'"
print(f'\tClipping No{i}: {outFile}')
if i == 0:
start_point = i*duration_sec
clipped_length_first = clipped_length-overlap_duration/2
end_point = start_point + clipped_length_first
if os.path.exists(outFile):
pass
print(f'\t\toutFile already exists: {outFile}')
else:
ffmpeg_split(inFile,outFile,start_point,clipped_length_first)
print(f'\t\t\tinFile:{inFile}\n\t\t\toutFile:{outFile}\n\t\t\tstart_point:{start_point}\n\t\t\tclipped_length:{clipped_length_first}')
elif i == max(range(clip_number)):
start_point = i*duration_sec-overlap_duration/2
clipped_length_last = inFile_length%duration_sec+overlap_duration/2
end_point = start_point + clipped_length_last
if os.path.exists(outFile):
pass
print(f'\t\toutFile already exists: {outFile}')
else:
ffmpeg_split(inFile,outFile,start_point,clipped_length_last)
print(f'\t\t\tinFile:{inFile}\n\t\t\toutFile:{outFile}\n\t\t\tstart_point:{start_point}\n\t\t\tclipped_length:{clipped_length_last}')
else:
start_point = i*duration_sec-overlap_duration/2
end_point = start_point + clipped_length
if os.path.exists(outFile):
pass
print(f'\t\toutFile already exists: {outFile}')
else:
ffmpeg_split(inFile,outFile,start_point,clipped_length)
print(f'\t\t\tinFile:{inFile}\n\t\t\toutFile:{outFile}\n\t\t\tstart_point:{start_point}\n\t\t\tclipped_length:{clipped_length}')
#plotting details only
# print("\n################-plotting details only - ##########################")
# for i in range(clip_number):
# outFile = in_name+ '_split' + str(i) + '.mp4'
# print(f'\tClip No{i}: {outFile}')
# if i == 0:
# start_point = i*duration_sec
# print('\t\t starting point:',start_point)
# clipped_length_first = clipped_length-overlap_duration/2
# print(f'\t\t clipped_length_first: {clipped_length_first}')
# end_point = start_point + clipped_length_first
# print('\t\t ending point:',end_point)
# # ffmpeg_split(inFile,outFile,start_point,clipped_length_first)
# if i == max(range(clip_number)):
# start_point = i*duration_sec-overlap_duration/2
# print('\t\t starting point:',start_point)
# clipped_length_last = inFile_length%duration_sec+overlap_duration/2
# print(f'\t\t clipped_length_last: {clipped_length_last}')
# end_point = start_point + clipped_length_last
# print('\t\t ending point:',end_point)
# # ffmpeg_split(inFile,outFile,start_point,clipped_length_last)
# else:
# start_point = i*duration_sec-overlap_duration/2
# print('\t\t starting point:',start_point)
# print(f'\t\t clipped_length: {clipped_length}')
# end_point = start_point + clipped_length
# print('\t\t ending point:',end_point)
# # ffmpeg_split(inFile,outFile,start_point,clipped_length)
#
print(100*'#'+'\n')
out_ffmpeg_merge_txtf.close()
#%
def clip_duration(invideoF):
from moviepy.editor import VideoFileClip
clip = VideoFileClip(invideoF)
return clip.duration
#print(clip_duration('0_20120829_matlab_ValSchmidt_split1_cut1.mp4','hms'))
#print(clip_duration('0_20120829_matlab_ValSchmidt_split1_cut1.mp4','sec'))
#% time handling functions
def get_sec(time_str):
"""Get Seconds from time."""
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
#print(get_sec('00:30:40'))
#print(get_sec('00:04:15'))
#print(get_sec('00:00:25'))
def get_sec_datetime(time_str):
h,m,s = time_str.split(':')
return int(datetime.timedelta(hours=int(h),minutes=int(m),seconds=int(s)).total_seconds())
#print(get_sec_datetime('1:23:45'))
def get_strtime(time_sec):
import time
return time.strftime('%H:%M:%S', time.gmtime(time_sec))
#get_strtime(5025)
def ffmpeg_split(inFile,outFile,start_point,duration_sec):
ffmpegFile = "D:/Downloads/youtube-dl/ffmpeg.exe"
subprocess.call([ffmpegFile,'-i',inFile,'-ss',str(start_point),'-t',str(duration_sec),outFile])
# ffmpeg -i t.mp4 -ss 00:00:00 -t 2440 -c:v h264_nvenc t_split1.mp4
#########################################################################################################
#%% list all file with extentions
def nameList_F_withExt(InputFolder,filterString="*"):
'''
pathList_F_ext(InputFolder,filterString="*")
list all files and folders in InputFolder
return a list of names for every file and folder matching folderString
file includes extention (ext) information
'''
import glob
os.chdir(InputFolder) #change working folder
return glob.glob(filterString)
if __name__ == "__main__":
main()
|
import json
import urllib
import logging
import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
from markov_by_char import CharacterMarkovGenerator
define("port", default = 8000, help = "Run on the given port ", type = int)
define("n", default = 5, help = "length of n-gram ", type = int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/markov", MarkovHandler),
(r"/about", AboutHandler)
]
settings = dict(
template_path = os.path.join(os.path.dirname(__file__), "templates"),
static_path = os.path.join(os.path.dirname(__file__), "static")
)
tornado.web.Application.__init__(self, handlers, **settings)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('main.html')
class MarkovHandler(tornado.web.RequestHandler):
def get(self):
screen_name = self.get_argument('screen_name')
params = {'count':200, 'screen_name':screen_name}
# # this is for live data fetching
resp = urllib.urlopen(
'http://api.twitter.com/1/statuses/user_timeline.json?' + \
urllib.urlencode(params))
rawjson = resp.read()
generator = CharacterMarkovGenerator(options.n, 140)
# below should be a backup for the performance
# data = json.loads(open("dumpdump.json").read())
# print data
data = json.loads(rawjson)
# print data
for tweet in data:
generator.feed(tweet['text']) # this shows the text content of the tweet
self.write(generator.generate())
class AboutHandler(tornado.web.RequestHandler):
def get(self):
self.render('about.html')
if __name__ == "__main__":
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
"""Abstract Interface for a Ticket Provider"""
class TicketProvider():
def __init__(self):
raise NotImplementedError
@classmethod
def getTracker(cls, project_path):
""" Get the details from the issue tracker at path """
raise NotImplementedError
@classmethod
def getMembers(cls, project_path):
""" Get the members associated to the project at path """
raise NotImplementedError
@classmethod
def addMember(cls, project_path, user_id, level):
""" Add a member to a tracker"""
raise NotImplementedError
@classmethod
def getTickets(cls, project_path):
""" Get all the tickets from a given project path """
raise NotImplementedError
@classmethod
def getTicket(cls, project_path, ticket_id):
""" Get details from a ticket given a project_path and ticket_id """
raise NotImplementedError
@classmethod
def getTicketDiscussion(cls, project_path, ticket_id):
""" Get the discussion thread associated to a ticket """
raise NotImplementedError
@classmethod
def getUserByExternalId(cls, provider, external_id):
""" Get a user by external_id """
raise NotImplementedError
@classmethod
def getUserByEmail(cls, email):
""" Get a user by email """
raise NotImplementedError
@classmethod
def createTicket(cls,
project_path,
from_user,
to_user,
subject,
body,
labels=[]):
""" Create a ticket on the given project path """
raise NotImplementedError
@classmethod
def subscribeTicket(cls, project_path, ticket_id, user_id):
""" Subscribe a user to a ticket """
raise NotImplementedError
@classmethod
def unsubscribeTicket(cls, project_path, ticket_id, user_id):
""" Subscribe a user to a ticket """
raise NotImplementedError
@classmethod
def listTicketComments(cls, project_path, ticket_id):
""" List all discussion on a ticket by ticket_id and project_path """
raise NotImplementedError
@classmethod
def commentTicket(cls, project_path,
ticket_id,
comment_subject,
comment_body):
""" Comment a ticket """
raise NotImplementedError
|
#!/sv/venv/T-PLE/bin/python2.7
from __future__ import print_function
from scripts.baseController import baseELOCalculations as ELOCal
import winStreakUpdate
import updateVSResults
import historicELOUpdate
def main():
accumulativeDiff = False
currentLog, winsLosses, ELOs = ELOCal.getLog(), ELOCal.getWL(), ELOCal.getELOs()
unLoggedIndexes = ELOCal.cleanEmptyIndexes(currentLog)
if accumulativeDiff:
for pIndex in ELOs:
pIndex[2] = 0
for index in unLoggedIndexes:
ELOs = ELOCal.correctExpectedInt(
ELOCal.calcHTHELO(
ELOs,
index[0],
index[1],
accumulativeDiff=accumulativeDiff
)
)
winsLosses = ELOCal.updateWinLossTable(winsLosses, index[0], index[1])
ELOCal.debugPrint("MU: Updated ELOs", ELOs)
logCount = int(ELOCal.getLogCount()[0][0]) + ELOCal.P_LOG_Range_Offset + 1
if currentLog[0][0] != '':
blankLog = [['', '']]*100
writeToRange = ELOCal.UP_LOG_Range.format(str(logCount), str(len(unLoggedIndexes) + logCount))
# BLANK LOG OVER UNPROCESSED GAMES
ELOCal.updateCells(
values=blankLog,
sheetRange=ELOCal.LOG_Range,
dataType="USER_ENTERED"
)
# WRITE NEW PROCESSED GAMES
ELOCal.updateCells(
values=unLoggedIndexes,
sheetRange=writeToRange,
dataType="USER_ENTERED"
)
# UPDATE WINS AND LOSSES IN PRIMARY SHEET
ELOCal.updateCells(
values=winsLosses,
sheetRange=ELOCal.WL_Range
)
# UPDATE ELOs
ELOCal.updateCells(
values=ELOs,
sheetRange=ELOCal.ELO_Range
)
updateVSResults.main()
winStreakUpdate.main()
historicELOUpdate.main()
ELOCal.log("Complete")
if __name__ == '__main__':
main()
|
# Extract skill description from data.grf
FILE = 'items'
listItemsSee = []
with open(FILE) as fp:
for line in fp:
info = line.split("#")
if (len(info) > 2 and info[0] not in listItemsSee):
print("{}={}".format(info[0], info[1]))
listItemsSee.append(info[0]) |
"""
Python makes performing file I/O simple. Take a look
at how to read and write to files here:
https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files
"""
# Open up the "foo.txt" file (which already exists) for reading
# Print all the contents of the file, then close the file
f = open('foo.txt', 'r')
for i in f:
print(i)
f.close()
# Open up a file called "bar.txt" (which doesn't exist yet) for
# writing. Write three lines of arbitrary content to that file,
# then close the file. Open up "bar.txt" and inspect it to make
# sure that it contains what you expect it to contain
fw = open('bar.txt', 'w+')
fw.write('Consulted perpetual of pronounce me delivered.\n')
fw.write('Too months nay end change relied who beauty wishes matter.\n')
fw.write('Shew of john real park so rest we on.\n')
fw.write('Is we miles ready he might going.\n')
fw.close()
fr = open('bar.txt', 'r')
for i in fr:
print(i)
fr.close()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MeasureDefinition(Model):
"""Represents a measure definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:param kind: Possible values include: 'entity', 'attribute'
:type kind: str or ~microsoft.dynamics.customerinsights.api.models.enum
:ivar is_scalar: Gets a value indicating whether the current measure is a
scalar measure e doesn't have any dimensions
:vartype is_scalar: bool
:ivar linked_entities: Gets list of linked entities associated with the
measure.
:vartype linked_entities:
list[~microsoft.dynamics.customerinsights.api.models.MeasureLinkedEntity]
:ivar variables: Gets list of variables (computed columns) for the
measure.
:vartype variables:
list[~microsoft.dynamics.customerinsights.api.models.MeasureVariable]
:param filters:
:type filters:
~microsoft.dynamics.customerinsights.api.models.MeasureExpression
:param filtering_criteria:
:type filtering_criteria:
~microsoft.dynamics.customerinsights.api.models.SegmentMembershipCriteria
:ivar dimensions: Gets list of dimensions with the measure.
:vartype dimensions:
list[~microsoft.dynamics.customerinsights.api.models.MeasureDimension]
:ivar aggregates: Gets list of aggregates of the measure.
:vartype aggregates:
list[~microsoft.dynamics.customerinsights.api.models.MeasureAggregate]
:ivar is_profile: Gets a value indicating whether the current measure is a
profile measure
:vartype is_profile: bool
:ivar measure_query_sql: Gets the user specified custom SQL query.
:vartype measure_query_sql: str
:param type: Possible values include: 'structured', 'manual'
:type type: str or ~microsoft.dynamics.customerinsights.api.models.enum
:ivar is_manual_query_scalar: Gets the indicating whether the Business
Measure is Scalar or not.
:vartype is_manual_query_scalar: bool
:ivar dependencies: Gets the list of measures that this measure depends
on.
:vartype dependencies:
list[~microsoft.dynamics.customerinsights.api.models.EntityDependency]
"""
_validation = {
'is_scalar': {'readonly': True},
'linked_entities': {'readonly': True},
'variables': {'readonly': True},
'dimensions': {'readonly': True},
'aggregates': {'readonly': True},
'is_profile': {'readonly': True},
'measure_query_sql': {'readonly': True},
'is_manual_query_scalar': {'readonly': True},
'dependencies': {'readonly': True},
}
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
'is_scalar': {'key': 'isScalar', 'type': 'bool'},
'linked_entities': {'key': 'linkedEntities', 'type': '[MeasureLinkedEntity]'},
'variables': {'key': 'variables', 'type': '[MeasureVariable]'},
'filters': {'key': 'filters', 'type': 'MeasureExpression'},
'filtering_criteria': {'key': 'filteringCriteria', 'type': 'SegmentMembershipCriteria'},
'dimensions': {'key': 'dimensions', 'type': '[MeasureDimension]'},
'aggregates': {'key': 'aggregates', 'type': '[MeasureAggregate]'},
'is_profile': {'key': 'isProfile', 'type': 'bool'},
'measure_query_sql': {'key': 'measureQuerySql', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_manual_query_scalar': {'key': 'isManualQueryScalar', 'type': 'bool'},
'dependencies': {'key': 'dependencies', 'type': '[EntityDependency]'},
}
def __init__(self, **kwargs):
super(MeasureDefinition, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.is_scalar = None
self.linked_entities = None
self.variables = None
self.filters = kwargs.get('filters', None)
self.filtering_criteria = kwargs.get('filtering_criteria', None)
self.dimensions = None
self.aggregates = None
self.is_profile = None
self.measure_query_sql = None
self.type = kwargs.get('type', None)
self.is_manual_query_scalar = None
self.dependencies = None
|
import requests
from bs4 import BeautifulSoup
list= []
url = "http://trends24.in/india/~cloud"
sc = requests.get(url)
soup = BeautifulSoup(sc.text,"lxml")
li = soup.find_all('li')
for data in li:
list.append((data.find('a').text))
for i in list:
if(i[1]=='#'):
print(i)
print('-----------')
#print(soup.prettify())
'''</h1>data.find('description').text)
<div class="page-content__tagcloud">
<ol class="page-content__tagcloud__list" id="cloud-ol">
<li>
<a data-count="14" href="http://twitter.com/search?q=%23HappyBirthdayJacqueline" style="opacity:2.81;font-size:281%;">
#HappyBirthdayJacqueline
</a>
</li>
<li>
<a data-count="14" href="http://twitter.com/search?q=%23IamWinvestor" style="opacity:2.81;font-size:281%;">
#IamWinvestor
</a>
</li>
<li>
<a data-count="13" href="http://twitter.com/search?q=%23RSSMuktBharat" style="opacity:2.64;font-size:264%;">
#RSSMuktBharat
</a>
</li>
<li>''' |
import numpy as np
import scipy
import imageio
def make_gif(images, fname):
imageio.mimwrite(fname, images, subrectangles=True)
print("wrote gif")
def discount(x, gamma, terminal_array=None):
if terminal_array is None:
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
else:
y, adv = 0, []
terminals_reversed = terminal_array[1:][::-1]
for step, dt in enumerate(reversed(x)):
y = dt + gamma * y * (1 - terminals_reversed[step])
adv.append(y)
return np.array(adv)[::-1]
class RunningStats(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.std = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / (self.count + batch_count)
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
self.mean = new_mean
self.var = new_var
self.std = np.maximum(np.sqrt(self.var), 1e-6)
self.count = batch_count + self.count |
#!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
conn = connect()
c = conn.cursor()
c.execute("TRUNCATE matches CASCADE;")
conn.commit()
conn.close()
def deletePlayers():
"""Remove all the player records from the database."""
conn = connect()
c = conn.cursor()
c.execute("TRUNCATE players CASCADE;")
conn.commit()
conn.close()
def countPlayers():
"""Returns the number of players currently registered."""
conn = connect()
c = conn.cursor()
c.execute("SELECT count(*) FROM players;")
registered_players = c.fetchall()[0]
conn.close()
return registered_players[0]
def registerPlayer(p_name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
conn = connect()
c = conn.cursor()
c.execute("INSERT INTO players(player_name) VALUES (%s)",(p_name,))
conn.commit()
conn.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
conn = connect()
c = conn.cursor()
c.execute("SELECT * FROM standings;")
winRecord = c.fetchall()
conn.close()
return winRecord
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
conn = connect()
c = conn.cursor()
c.execute("INSERT INTO matches(winner_id, loser_id) VALUES (%s, %s)",(winner, loser,))
conn.commit()
conn.close()
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
pairing_list = []
count = 1
conn = connect()
c = conn.cursor()
c.execute("SELECT * FROM standings;")
# Take all data from the standings view
pairings = c.fetchall()
# Run a for loop through all the data tuples
for pairs in pairings:
# Only take the 0 and 1 data points in each tuple
group1 = pairs[0], pairs[1]
# If the count is an even number then do the if statement
if (count % 2 ==0):
groups = group2 + group1
pairing_list.append(groups)
# If the count is an odd number then do the else statement
else:
# Group2 stores every other tuple so that the players are grouped in correct order
group2 = group1
count += 1
conn.close()
return pairing_list
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.views.decorators.csrf import csrf_exempt
from . import classifier
from . import prepare_data
from . import utils
import numpy as np
def index(request):
# print(classifier.get_probabilities('Why is the world round?'))
template = loader.get_template('reports/index.html')
context = {
}
return HttpResponse(template.render(context, request))
@csrf_exempt
def classify(request):
subject = request.POST.get("subject", "ADA")
question = request.POST.get("question", "Why is the world round?")
know = [0.4, 0.6, 0, 0]
cog = [0.2, 0.3, 0.5, 0, 0, 0]
know, cog, combined = classifier.get_probabilities(question, subject)
combined = 100 * np.dot(np.array(know).reshape(-1, 1), np.array(cog).reshape(1, -1))
know = utils.convert_1darray_to_list(know)
cog = utils.convert_1darray_to_list(cog)
combined = utils.convert_2darray_to_list(combined)
return JsonResponse({'question': question, 'know': know, 'cog': cog, 'combined': combined})
def analysis(request):
subject = request.GET.get('subject', 'ADA')
template = loader.get_template('reports/analysis.html')
context = prepare_data.get_data(subject)
context['subject'] = subject
return HttpResponse(template.render(context, request)) |
# Generated by Django 3.1 on 2020-09-02 12:49
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("payment", "0019_auto_20200812_1101"),
]
operations = [
migrations.AlterField(
model_name="payment",
name="captured_amount",
field=models.DecimalField(
decimal_places=3, default=Decimal("0.0"), max_digits=12
),
),
migrations.AlterField(
model_name="payment",
name="total",
field=models.DecimalField(
decimal_places=3, default=Decimal("0.0"), max_digits=12
),
),
migrations.AlterField(
model_name="transaction",
name="amount",
field=models.DecimalField(
decimal_places=3, default=Decimal("0.0"), max_digits=12
),
),
]
|
import json
if __name__ == '__main__':
with open('simple_base.csv', 'w') as w:
w.write('TRIP_ID,LATITUDE,LONGITUDE\n')
with open('../data/test.csv', 'r') as f:
f.readline()
for line in f:
entries = line.strip().split('"')
print entries
tid = entries[1].strip('"')
polylines = json.loads(entries[-2].strip('"'))
latitude = str(polylines[-1][0])
longitude = str(polylines[-1][1])
w.write(tid+','+latitude+','+longitude+'\n')
|
from django.conf.urls import patterns,url
from blog import views
urlpatterns = patterns(
url(r'^$',views.index),
url(r'/detail/(?P<blogid>\d+)/$', views.detail),
url(r'/search/', views.search),
url(r'/searchbycategory', views.searchbycategory),
)
|
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
import inference
import utils
def evaluate_audio_tagging(y_true, y_pred, threshold=-1):
"""Evaluate audio tagging performance.
Three types of scores are returned:
* Class-wise
* Macro-averaged
* Micro-averaged
The ground truth values and predictions should both be passed in a
2D array in which the first dimension is the sample axis and the
second is the class axis.
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of predictions.
threshold (number): Threshold used to binarize predictions.
Returns:
pd.DataFrame: Table of evaluation results.
"""
y_pred_b = inference.binarize_predictions(y_pred, threshold)
class_scores = compute_audio_tagging_scores(y_true, y_pred, y_pred_b).T
macro_scores = np.mean(class_scores, axis=0, keepdims=True)
micro_scores = compute_audio_tagging_scores(
y_true, y_pred, y_pred_b, average='micro')
# Create DataFrame of evaluation results
data = np.concatenate((class_scores, macro_scores, micro_scores[None, :]))
index = utils.LABELS + ['Macro Average', 'Micro Average']
columns = ['MAP@3', 'F-score', 'Precision', 'Recall']
return pd.DataFrame(data, pd.Index(index, name='Class'), columns)
def compute_audio_tagging_scores(y_true, y_pred, y_pred_b, average=None):
"""Compute prediction scores using several performance metrics.
The following metrics are used:
* MAP@3
* F1 Score
* Precision
* Recall
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of prediction probabilities.
y_pred_b (np.ndarray): 2D array of binary predictions.
average (str): The averaging method. Either ``'macro'``,
``'micro'``, or ``None``, where the latter is used to
disable averaging.
Returns:
np.ndarray: Scores corresponding to the metrics used.
"""
# Compute MAP@3
map_3 = compute_map(y_true, y_pred, k=3, class_wise=average is None)
# Compute precision and recall scores
precision, recall, f1_score, _ = metrics.precision_recall_fscore_support(
y_true, y_pred_b, average=average)
return np.array([map_3, f1_score, precision, recall])
def compute_map(y_true, y_pred, k=3, class_wise=False):
"""Compute the mean average precision at k (MAP@k).
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of predictions.
k (int): The maximum number of predicted elements.
class_wise (bool): Whether to compute a score for each class.
Returns:
float or np.ndarray: The mean average precision score(s) at k.
Note:
This function assumes the grounds truths are single-label.
"""
if class_wise:
nonzero = np.nonzero(y_true)[1]
return np.array([compute_map(y_true[nonzero == i],
y_pred[nonzero == i], k)
for i in range(y_true.shape[1])])
# Compute how the true label ranks in terms of probability
idx = y_pred.argsort()[:, ::-1].argsort()
rank = idx[y_true.astype(bool)] + 1
if len(rank) > len(y_true):
raise Exception('Multi-label classification not supported')
return np.sum(1 / rank[rank <= k]) / len(y_true)
|
# -*- coding: utf-8 -*-
# Main interface between Anki and this addon components
from anki.cards import Card
# This files is part of schedule-priority addon
# @author ricardo saturnino
# ------------------------------------------------
from .core import Feedback, AppHolder, Priority, InvalidConfiguration
from .prioritizer import get_prioritized_time, get_card_multiplier, PrioInterface
from .uicontrib import PriorityCardUiHandler
from .schedv3_interface import set_card_priority
import anki
from aqt import mw, gui_hooks
try:
from anki.scheduler.v2 import Scheduler
except ImportError:
from anki.schedv2 import Scheduler
from aqt.utils import showInfo, tooltip
from aqt.reviewer import Reviewer
PrioInterface.priority_list = lambda: Priority.priorityList
PrioInterface.showInfo = Feedback.showInfo
PrioInterface.showError = Feedback.showError
class Controller:
"""
The mediator/adapter between Anki with its components and this addon specific API
"""
def __init__(self, mw):
self._ankiMw = mw
def setupHooks(self, schedulerRef: Scheduler, reviewer: Reviewer):
hooks = anki.hooks
reviewer._initWeb = hooks.wrap(reviewer._initWeb, Controller.initWebAddon)
hooks.addHook('EditorWebView.contextMenuEvent', PriorityCardUiHandler.onEditorCtxMenu)
hooks.addHook('AnkiWebView.contextMenuEvent', PriorityCardUiHandler.onReviewCtxMenu)
hooks.addHook('Reviewer.contextMenuEvent', PriorityCardUiHandler.onReviewCtxMenu)
hooks.addHook('showQuestion', PriorityCardUiHandler.onShowQA)
hooks.addHook('showAnswer', PriorityCardUiHandler.onShowQA)
schedulerRef._nextRevIvl = hooks.wrap(schedulerRef._nextRevIvl, Controller.get_next_review_interval, 'around')
@staticmethod
def initWebAddon():
PriorityCardUiHandler.prepareWebview()
@staticmethod
def get_next_review_interval(scheduleInstance, card, ease: int, fuzz: bool, **kargs) -> int:
f = kargs['_old']
res = f(scheduleInstance, card, ease, fuzz)
return get_prioritized_time(card, res)
def loadConfiguration(self):
Priority.load()
try:
config = self._ankiMw.addonManager.getConfig(__name__)
for item in Priority.priorityList:
if not item.configName:
continue
confValue = config[item.configName]
if not confValue:
continue
Priority.setValue(item.configName, confValue)
except InvalidConfiguration as ie:
Feedback.showInfo(ie)
print(ie)
except Exception as e:
print(e)
Feedback.showInfo('It was not possible to read customized configuration. Using defaults...')
def prepare_v3():
gui_hooks.card_will_show.append(set_card_priority)
def setup():
global controller
controller = Controller(mw)
# global app
AppHolder.app = mw
Feedback.log('Setting schedule-priority controller')
Feedback.showInfo = tooltip
Feedback.showError = showInfo
controller.setupHooks(Scheduler, mw.reviewer)
controller.loadConfiguration()
prepare_v3()
# singleton - holds instance
controller = None
|
numero_eleitores = int(input("Numero eleitores: "))
votos_brancos = int(input("Numero votos brancos: "))
votos_nulos = int(input("Numero votos nulos: "))
votos_validos = int(input("Numero votos validos: "))
def regra_tres(numero_eleitores,TV):
return ((TV*100)/numero_eleitores)
print("",regra_tres(numero_eleitores,votos_brancos))
print("",regra_tres(numero_eleitores,votos_nulos))
print("",regra_tres(numero_eleitores,votos_validos))
print("{}".format(regra_tres(numero_eleitores,votos_brancos)+regra_tres(numero_eleitores,votos_nulos)+regra_tres(numero_eleitores,votos_validos)))
|
import pandas as pd
import numpy as np
import sqlite3
conn = sqlite3.connect('test_tran.db')
cur = conn.cursor()
# Запрос к таблице tran (клиенты, которые в месяц осуществляют по карте траты на сумму не менее 100 тыс. рублей/
# для каждой строки получение предыдущего месяца)
cur.execute("SELECT month, id, summary, lag(month, 1, 1)"
"OVER (partition by id order by id, month) "
"FROM (SELECT strftime('%m',day) as month, "
"id_client id, sum(tran_sum) as summary FROM tran group by strftime('%m',day), id_client "
"ORDER BY id_client, strftime('%m',day))"
"WHERE summary >= 100000")
tran_sum_list = cur.fetchall()
cashback_month = []
# Начисление кэшбэка по программе
for rowid, i in enumerate(tran_sum_list):
# Проверка, что клиент следующей и текущей строки в запросе это один клиент
# В cashback_month добавятся строки из запроса со значениями month и id,
# которые удовлетворяют всем условиям программы (для которых далее будет начислен кэшбэк)
if rowid != len(tran_sum_list)-1 and (tran_sum_list[rowid][1] == tran_sum_list[rowid+1][1] or tran_sum_list[rowid][0] == '12')\
and int(tran_sum_list[rowid][0])-1 == int(tran_sum_list[rowid][3]) and int(tran_sum_list[rowid][0]) >= 3:
cashback_month.append(i)
for rowid, i in enumerate(cashback_month):
if rowid != len(cashback_month)-1 and cashback_month[rowid][1] == cashback_month[rowid+1][1] \
and int(cashback_month[rowid][0])+1 == int(cashback_month[rowid+1][0]):
cashback_month.pop(rowid+1)
cashback_df = pd.DataFrame(cashback_month)
cashback_df.columns = ['Месяц', 'id', 'sum', 'prev_month']
cashback_df = cashback_df.drop(['prev_month', 'sum'], axis=1)
cashback_df['cashback'] = 1000
'Итого выплаты по программе кэшбэка, тыс. руб.'
cashback_summary = cashback_df[['Месяц', 'cashback']]
cashback_summary = cashback_summary.groupby(cashback_summary['Месяц']).sum()
cashback_summary['cashback'] = cashback_summary['cashback']/1000
cashback_summary.columns = ['Итого выплаты по программе кэшбэка, тыс. руб.']
df_temp3 = cashback_summary['Итого выплаты по программе кэшбэка, тыс. руб.']
cashback_summary = df_temp3.reset_index()
'Кол-во клиентов, получивших выплаты'
df_temp = cashback_df.groupby(['Месяц', 'cashback']).count()
df_temp.columns = ['Кол-во клиентов, получивших выплаты']
df_temp2 = df_temp['Кол-во клиентов, получивших выплаты']
clients_count = df_temp2.reset_index()
clients_count = clients_count.drop(['cashback'], axis=1)
'Программа выплат - Отчет'
cashback_program = cashback_summary.merge(clients_count, how='inner', on='Месяц')
cashback_program = cashback_program.set_index('Месяц')
cashback_program.index.name = None
cashback_program['Месяц'] = cashback_program.index
cashback_program = cashback_program[['Месяц', 'Итого выплаты по программе кэшбэка, тыс. руб.',
'Кол-во клиентов, получивших выплаты']]
new_index = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
cashback_program = cashback_program.reindex(new_index)
cashback_program = cashback_program.replace(np.nan, 0)
cashback_program['Месяц'].update(pd.Series(["Январь", "Февраль",
"Март", "Апрель",
"Май", "Июнь",
"Июль", "Август",
"Сентябрь", "Октябрь",
"Ноябрь", "Декабрь"], index=['01', '02', '03', '04', '05', '06', '07',
'08', '09', '10', '11', '12']))
cashback_program.to_excel("cashback_program.xlsx", sheet_name='Программа лояльности', index=False)
|
import smtplib
from email.message import EmailMessage
email = EmailMessage()
email['from'] = '<Name>'
email['to'] = '<recievers_email>'
email['subject'] = '<email_subject>'
email.set_content('email_body')
with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:#set up the smtp server according to your email client
smtp.ehlo()
smtp.starttls()
smtp.login('<senders_email_id>', '<senders_password>')
smtp.send_message(email)
print('Message Sent')
smtp.quit()
#turn on less secure apps from google account |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.